v2
copy /bin/bash /jail/bash
mount -o bind /home /jail/home
$ unshare --fork --pid --mount-proc bash
$ ps auxf
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 8964 3924 pts/4 S 00:25 0:00 bash
root 8 0.0 0.0 10612 3304 pts/4 R+ 00:25 0:00 ps auxf
π€
$ docker run -ti --rm ubuntu bash
root@03f784316baa:/# ps auxf
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.2 0.0 4112 3436 pts/0 Ss 21:40 0:00 bash
root 8 0.0 0.0 5884 2948 pts/0 R+ 21:40 0:00 ps auxf
π
Any Docker container is just a process
FROM ubuntu
RUN apt update && apt install nginx
CMD ["nginx", "-g", "daemon off;"]
$ docker build -t my-nginx .
$ docker run -p 80:80 my-nginx
$ ps axf -o user,pid,command
root 19616 /usr/bin/containerd
root 21721 \_ containerd-shim -namespace moby -workdir /var/lib/containerd/io.containerd.runtime.v1.linux/moby/35a2b88eb0659e452be831115395352359e35669efa766931d350296aa154c6d -address /run/containerd/containerd.sock -containerd-binary /usr/bin/containerd -runtime-root /var/run/docker/runtime-runc
root 21745 \_ nginx: master process nginx -g daemon off;
nobody 21788 \_ nginx: worker process
nobody 21789 \_ nginx: worker process
root 23571 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
/var/run/docker.sock
dockerd
pwn, it runs as root!--privileged
--cap-add=SYS_ADMIN
--security-opt seccomp=unconfined
You need to run and scale 1000 containers
They all should be talking to each other via network
They should use network storage to run database on it
They have a load balancer
They should dynamically adjust nodes base on load
Kubernetes (K8s) is an open-source system for automating deployment, scaling, and management of containerized applications.
$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
kube-a Ready master 13d v1.18.6 10.166.0.8 <none> Ubuntu 16.04.7 LTS 4.15.0-1080-gcp docker://18.9.7
kube-b Ready <none> 13d v1.18.6 10.166.0.7 <none> Ubuntu 16.04.7 LTS 4.15.0-1080-gcp docker://18.9.7
apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: default
spec:
containers:
- name: nginx
image: nginx
$ kubectl apply -f nginx.yaml
pod/nginx created
kubeadm
Example:
etcd on kubeadm < 1.16 accepts TLS certificates issued by apiserver CA (certs used for auth)
Exploitation: create a pod with hostNetwork: true
apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: default
spec:
containers:
- name: nginx
image: nginx
Namespaces only effects resourses in RBAC!
e.g. namespaces do not provide network isolation
apiVersion: v1
kind: ServiceAccount
metadata:
name: remover
Allowes workloads using Kubernetes API
root@nginx:/# ls /var/run/secrets/kubernetes.io/serviceaccount/
ca.crt namespace token
root@nginx:/var/run/secrets/kubernetes.io/serviceaccount# cat token
eyJhbGc...UNg
apiVersion: v1
kind: ServiceAccount
metadata:
name: remover
automountServiceAccountToken: false
...
ServiceAccount <---> RoleBinding <---> Role
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: remover
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: remover
subjects:
- kind: ServiceAccount
name: remover
roleRef:
kind: Role
name: remover
apiGroup: rbac.authorization.k8s.io
$ kubectl auth can-i --list
Resources Non-Resource URLs Resource Names Verbs
*.* [] [] [*]
[*] [] [*]
[/api/*] [] [get]
[/api] [] [get]
[/apis/*] [] [get]
[/apis] [] [get]
Redhat 2021 Kubernetes security report
hostPath
- mounts directory from nodeprivileged
HostNetwork
allowPrivilegeEscalation
Hvatit eto terpet'!
Enables us to set restrictive policy on security context
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restrictive
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
hostNetwork: false
hostPID: false
hostIPC: false
runAsUser:
rule: RunAsAny
fsGroup:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'configMap'
- 'downwardAPI'
- 'emptyDir'
- 'persistentVolumeClaim'
- 'secret'
- 'projected'
allowedCapabilities:
- 'NET_RAW'
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: restrictive-psp
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- "restrictive"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: restrictive-psp
subjects:
- kind: ServiceAccount
name: remover
roleRef:
kind: Role
name: restrictive-psp
apiGroup: rbac.authorization.k8s.io
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-egress
namespace: app
spec:
podSelector:
matchLabels: {}
policyTypes:
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-dns-access
namespace: app
spec:
podSelector:
matchLabels: {}
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
name: kube-system
ports:
- protocol: UDP
port: 53
- protocol: TCP
port: 53
Network plugin support required.
You won't get a warning if it's not working!
$ curl "http://metadata.google.internal/computeMetadata/v1beta1/instance/service-accounts/default/token"
{"access_token":"ya29.c.Ko...QtkM","expires_in":2837,"token_type":"Bearer"}
$ curl "https://cloudresourcemanager.googleapis.com/v1/projects?alt=json" -H "Authorization: Bearer ya29..."
...
$ curl -H "Authorization: Bearer ya29..." "http://kube-apiserver:6443/api/v1"
{...}
*name*.*namespace*.svc.cluster.local
curl http://kubernetes-dashboard.kubernetes-dashboard.svc.cluster.local:443/api/v1/ -k
Alt +