Skip to main content

RuntimeClasses

É um recurso do Kubernetes que permite que o kubelet execute pods com diferentes runtimes. Uma vez criado o runtime class é possível especificar no pod um runtime diferente do padrão.

Um runtime class é somente um nome que dirá quem é o handler que será utilizado para manipular o pod.

Vamos criar um runtime class para o gvisor. Runtime Classes estão em nível de cluster.

root@cks-master:~# vim rc.yaml

root@cks-master:~# cat rc.yaml

apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: gvisor
handler: runsc

root@cks-master:~# k apply -f rc.yaml
runtimeclass.node.k8s.io/gvisor created

root@cks-master:~# k get runtimeclasses.node.k8s.io
NAME HANDLER AGE
gvisor runsc 12s

Agora vamos criar um pod apontando um runtime que não existe e depois um que existe mas que não está presente no node.

root@cks-master:~# k get pods
No resources found in default namespace.

root@cks-master:~# k run pod --image=nginx -oyaml --dry-run=client > podrc.yaml

root@cks-master:~# vim podrc.yaml

root@cks-master:~# cat podrc.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod
name: pod
spec:
runtimeClassName: teste # Não existe
containers:
- image: nginx
name: pod
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

# Não é possível criar se o runtime não existir
root@cks-master:~# k apply -f podrc.yaml
Error from server (Forbidden): error when creating "podrc.yaml": pods "pod" is forbidden: pod rejected: RuntimeClass "teste" not found

# Editando e colocando um que existe, mas não esta presente no node.
root@cks-master:~# vim podrc.yaml

root@cks-master:~# cat podrc.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod
name: pod
spec:
runtimeClassName: gvisor #<<<<<
containers:
- image: nginx
name: pod
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

root@cks-master:~# k apply -f podrc.yaml
pod/pod created

# Foi para o worker, mas esta sempre esperando ser criado pois não temos o runsc disponível.
root@cks-master:~# k get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod 0/1 ContainerCreating 0 15s <none> cks-worker <none> <none>

root@cks-master:~# k describe pod pod
Name: pod
Namespace: default
Priority: 0
Runtime Class Name: gvisor
Service Account: default
Node: cks-worker/10.128.0.4
Start Time: Wed, 28 Aug 2024 14:36:34 +0000
Labels: run=pod
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Containers:
pod:
Container ID:
Image: nginx
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-bjk49 (ro)
Conditions:
Type Status
PodReadyToStartContainers False
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-bjk49:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 3m41s default-scheduler Successfully assigned default/pod to cks-worker
Warning FailedCreatePodSandBox 4s (x18 over 3m40s) kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to get sandbox runtime: no runtime for "runsc" is configured # <<<< Não encontrou o runtime.

Vamos instalar o gVisor no nosso worker node. Abaixo o script de instalação.

#!/usr/bin/env bash
# IF THIS FAILS then you can try to change the URL= further down from specific to the latest release
# https://gvisor.dev/docs/user_guide/install

# gvisor
sudo apt-get update && \
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common

# install from web
(
set -e
ARCH=$(uname -m)
# URL=https://storage.googleapis.com/gvisor/releases/release/20210806/${ARCH}
URL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}
wget ${URL}/runsc ${URL}/runsc.sha512 \
${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512
sha512sum -c runsc.sha512 \
-c containerd-shim-runsc-v1.sha512
rm -f *.sha512
chmod a+rx runsc containerd-shim-runsc-v1
sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin
)

# containerd enable runsc
# Estamos adicionando um plugin para avisar ao containerd para também trabalhar com o runsc

# [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
# runtime_type = "io.containerd.runsc.v1"

cat > /etc/containerd/config.toml <<EOF
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
version = 2

[plugins]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runsc]
runtime_type = "io.containerd.runsc.v1"

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
EOF

systemctl restart containerd

Vamos rodar isso.

root@cks-worker:~# vim installgvisor.sh
root@cks-worker:~# sh installgvisor.sh
Hit:1 http://us-central1.gce.archive.ubuntu.com/ubuntu focal InRelease
Get:2 http://us-central1.gce.archive.ubuntu.com/ubuntu focal-updates InRelease [128 kB]
Hit:3 http://us-central1.gce.archive.ubuntu.com/ubuntu focal-backports InRelease
Hit:5 http://security.ubuntu.com/ubuntu focal-security InRelease
Hit:4 https://prod-cdn.packages.k8s.io/repositories/isv:/kubernetes:/core:/stable:/v1.28/deb InRelease
Hit:6 https://prod-cdn.packages.k8s.io/repositories/isv:/kubernetes:/core:/stable:/v1.29/deb InRelease
Hit:7 https://prod-cdn.packages.k8s.io/repositories/isv:/kubernetes:/core:/stable:/v1.30/deb InRelease
Hit:8 https://prod-cdn.packages.k8s.io/repositories/isv:/kubernetes:/core:/stable:/v1.31/deb InRelease
Get:9 http://us-central1.gce.archive.ubuntu.com/ubuntu focal-updates/main amd64 Packages [3536 kB]
Get:10 http://us-central1.gce.archive.ubuntu.com/ubuntu focal-updates/universe amd64 Packages [1220 kB]
Fetched 4884 kB in 2s (2777 kB/s)
Reading package lists... Done
Reading package lists... Done
Building dependency tree
Reading state information... Done
ca-certificates is already the newest version (20230311ubuntu0.20.04.1).
curl is already the newest version (7.68.0-1ubuntu2.23).
curl set to manually installed.
software-properties-common is already the newest version (0.99.9.12).
software-properties-common set to manually installed.
apt-transport-https is already the newest version (2.0.10).
The following NEW packages will be installed:
gnupg-agent
0 upgraded, 1 newly installed, 0 to remove and 12 not upgraded.
Need to get 5240 B of archives.
After this operation, 46.1 kB of additional disk space will be used.
Get:1 http://us-central1.gce.archive.ubuntu.com/ubuntu focal-updates/universe amd64 gnupg-agent all 2.2.19-3ubuntu2.2 [5240 B]
Fetched 5240 B in 0s (156 kB/s)
Selecting previously unselected package gnupg-agent.
(Reading database ... 92983 files and directories currently installed.)
Preparing to unpack .../gnupg-agent_2.2.19-3ubuntu2.2_all.deb ...
Unpacking gnupg-agent (2.2.19-3ubuntu2.2) ...
Setting up gnupg-agent (2.2.19-3ubuntu2.2) ...
--2024-08-28 15:36:47-- https://storage.googleapis.com/gvisor/releases/release/latest/x86_64/runsc
Resolving storage.googleapis.com (storage.googleapis.com)... 173.194.206.207, 74.125.126.207, 74.125.132.207, ...
Connecting to storage.googleapis.com (storage.googleapis.com)|173.194.206.207|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 66330190 (63M) [application/octet-stream]
Saving to: ‘runsc’

runsc 100%[=========================================================================================>] 63.26M 177MB/s in 0.4s

2024-08-28 15:36:48 (177 MB/s) - ‘runsc’ saved [66330190/66330190]

--2024-08-28 15:36:48-- https://storage.googleapis.com/gvisor/releases/release/latest/x86_64/runsc.sha512
Reusing existing connection to storage.googleapis.com:443.
HTTP request sent, awaiting response... 200 OK
Length: 136 [application/octet-stream]
Saving to: ‘runsc.sha512’

runsc.sha512 100%[=========================================================================================>] 136 --.-KB/s in 0s

2024-08-28 15:36:48 (11.9 MB/s) - ‘runsc.sha512’ saved [136/136]

--2024-08-28 15:36:48-- https://storage.googleapis.com/gvisor/releases/release/latest/x86_64/containerd-shim-runsc-v1
Reusing existing connection to storage.googleapis.com:443.
HTTP request sent, awaiting response... 200 OK
Length: 28970030 (28M) [application/octet-stream]
Saving to: ‘containerd-shim-runsc-v1’

containerd-shim-runsc-v1 100%[=========================================================================================>] 27.63M 135MB/s in 0.2s

2024-08-28 15:36:48 (135 MB/s) - ‘containerd-shim-runsc-v1’ saved [28970030/28970030]

--2024-08-28 15:36:48-- https://storage.googleapis.com/gvisor/releases/release/latest/x86_64/containerd-shim-runsc-v1.sha512
Reusing existing connection to storage.googleapis.com:443.
HTTP request sent, awaiting response... 200 OK
Length: 155 [application/octet-stream]
Saving to: ‘containerd-shim-runsc-v1.sha512’

containerd-shim-runsc-v1.sha512 100%[=========================================================================================>] 155 --.-KB/s in 0s

2024-08-28 15:36:48 (12.1 MB/s) - ‘containerd-shim-runsc-v1.sha512’ saved [155/155]

FINISHED --2024-08-28 15:36:48--
Total wall clock time: 0.9s
Downloaded: 4 files, 91M in 0.6s (162 MB/s)
runsc: OK
containerd-shim-runsc-v1: OK
root@cks-worker:~# systemctl status containerd | grep Active
Active: active (running) since Wed 2024-08-28 15:36:49 UTC; 8s ago
root@cks-worker:~# systemctl status kubelet | grep Active
Active: active (running) since Tue 2024-08-27 13:31:32 UTC; 1 day 2h ago

E agora se observamos no master o pod.

root@cks-master:~# k get pod
NAME READY STATUS RESTARTS AGE
pod 1/1 Running 0 60m

# Se atente na versão do kernel do node worker
root@cks-master:~# k get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
cks-master Ready control-plane 12d v1.31.0 10.128.0.5 <none> Ubuntu 20.04.6 LTS 5.15.0-1067-gcp containerd://1.6.12
cks-worker Ready <none> 12d v1.31.0 10.128.0.4 <none> Ubuntu 20.04.6 LTS 5.15.0-1067-gcp containerd://1.6.12


# Vamos entrar no pod e fazer um syscall
root@cks-master:~# k exec pod -it -- bash
root@pod:/# uname -r
4.4.0 # Diferente do host

# Uma chamada de sistema dmesg podemos observar que o gvisor esta funcionando. dmesg exibe o buffer de mensagem do kernel.
root@pod:/# dmesg
[ 0.000000] Starting gVisor...
[ 0.549611] Letting the watchdogs out...
[ 0.723449] Preparing for the zombie uprising...
[ 1.155120] Searching for needles in stacks...
[ 1.300624] Singleplexing /dev/ptmx...
[ 1.588387] Verifying that no non-zero bytes made their way into /dev/zero...
[ 2.048352] Checking naughty and nice process list...
[ 2.338411] Adversarially training Redcode AI...
[ 2.439613] Synthesizing system calls...
[ 2.870611] Granting licence to kill(2)...
[ 3.330405] Accelerating teletypewriter to 9600 baud...
[ 3.336746] Setting up VFS...
[ 3.554635] Setting up FUSE...
[ 3.613755] Ready!

# Rodando outro pod sem o gvisor

root@cks-master:~# k run nginx --image=nginx
pod/nginx created

root@cks-master:~# k exec -it nginx -- bash
root@nginx:/# dmesg
dmesg: read kernel buffer failed: Operation not permitted

root@nginx:/# uname -r
5.15.0-1067-gcp