# 云原生 K8s 安全专家 CKS 认证考题详解

# 1、K8s 集群安全加固:禁止匿名访问

pEgL5Uf.png

#1. 更改授权模式和添加 NodeRestriction 准入控制器
root@master01:~# cat /etc/kubernetes/manifests/kube-apiserver.yaml 
...
    - --enable-admission-plugins=NodeRestriction
    - --authorization-mode=Node,RBAC
...
#2. 删除 clusterrolebinding
root@master01:~# systemctl restart kubelet
root@master01:~# kubectl delete clusterrolebinding system:anonymous --kubeconfig=/etc/kubernetes/admin.conf

# 2、K8s 基准测试考题分析

pEKU1gA.png

pEKUGut.png

#切换集群,登录 master 节点
#1. 修改 apiserver
root@master01:~# cat /etc/kubernetes/manifests/kube-apiserver.yaml 
...
    - --authorization-mode=Node,RBAC
...
#2. 修改 etcd
root@master01:~# cat /etc/kubernetes/manifests/etcd.yaml 
...
    - --client-cert-auth=true
...
#3. 修改 kubelet
root@master01:~# cat /var/lib/kubelet/config.yaml 
...
authentication:
  anonymous:
    enabled: false
...
authorization:
  mode: Webhook
...
#4. 重启 kubelet
root@master01:~# systemctl daemon-reload
root@master01:~# systemctl restart kubelet
#5. 登录 node 节点
root@node01:~# cat /var/lib/kubelet/config.yaml 
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
...
authorization:
  mode: Webhook
  ...
  
root@node01:~# systemctl daemon-reload
root@node01:~# systemctl restart kubelet

# 3、K8s 密文管理考题分析

pEKUL5D.png

root@master01:~# kubectl get deployment -n clever
NAME     READY   UP-TO-DATE   AVAILABLE   AGE
clever   0/1     1            0           86d
root@master01:~# kubectl get pods -n clever
NAME                      READY   STATUS              RESTARTS   AGE
clever-6766f68d99-75xt8   0/1     ContainerCreating   0          86d
root@master01:~# kubectl create secret tls clever -n clever --cert ~/cert/tls.crt --key ~/cert/tls.key 
secret/clever created
root@master01:~# kubectl get secret -n clever
NAME     TYPE                DATA   AGE
clever   kubernetes.io/tls   2      2m31s
root@master01:~# kubectl get pods -n clever
NAME                      READY   STATUS              RESTARTS   AGE
clever-6766f68d99-75xt8   0/1     ContainerCreating   0          86d

# 4、K8s 资源优化:提升容器安全性

pEKBeYT.png

#1. 优化 Dockerfile
root@master01:~# cat /home/candidate/Dockerfile 
FROM ubuntu:16.04
USER root
RUN apt get install -y nginx=4.2
ENV ENV=testing
USER 65535
CMD ["nginx -d"]
#2. 优化 Deployment
root@master01:~# cat /home/candidate/deployment.yaml 
...
        securityContext:
           {'capabilities':{'add':['NET_ADMIN'],'drop':['all']},'privileged': False,'readOnlyRootFilesystem': True, 'runAsUser': 65535}

# 5、Docker 安全问题考题分析

pElQ359.png

#1. 删除 develop 用户
root@master01:~# ssh node001
root@node01:~# id develop
uid=1001(develop) gid=1001(develop) groups=1001(develop),999(docker)
root@node01:~# gpasswd -d develop docker
root@node01:~# id develop
uid=1001(develop) gid=1001(develop) groups=1001(develop)
#2. 修改 docker 的配置文件
#2.1 查看 docker 配置文件路径
root@node01:~# systemctl status docker
...
     Loaded: loaded (/lib/systemd/system/docker.service; enabled; vendor preset: enabled)
     Active: active (running) since Sat 2025-02-22 06:04:52 UTC; 57min ago
...
root@node01:~# ss -lntp|grep 2375
LISTEN 0      4096                 *:2375             *:*    users:(("dockerd",pid=1336,fd=3))
...
#2.2 查看监听端口 2375
root@node01:~# ss -lntp|grep 2375
LISTEN 0      4096                 *:2375             *:*    users:(("dockerd",pid=1325,fd=3))  
#2.3 修改 docker 配置文件
root@node01:~# vim /lib/systemd/system/docker.service 
...
[Socket]
SocketUser=root
SocketGroup=root
ListenStream=/var/run/docker.sock
...
#ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2375 --containerd=/run/containerd/containerd.sock
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
...
#2.4 重启 docker
root@node01:~# systemctl daemon-reload
root@node01:~# systemctl restart docker
root@node01:~# ss -lntp|grep 2375

# 6、K8s Ingress SSL 考题分析

pEKDfPK.png

Concepts->Services, Load Balancing, and Networking->Ingress->tls

root@master01:~# kubectl get svc -n prod
NAME   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
web    ClusterIP   10.96.212.149   <none>        80/TCP    86d
root@master01:~# kubectl get ingressclass
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       86d
root@master01:~# vim ingress-web.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: web
  namespace: prod
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
  ingressClassName: nginx
  tls:
  - hosts:
      - web.k8s.local
    secretName: web-cert
  rules:
  - host: web.k8s.local
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: web
            port:
              number: 80
root@master01:~# kubectl apply -f ingress-web.yaml 
ingress.networking.k8s.io/web created
root@master01:~# kubectl get ingress -n prod
NAME   CLASS   HOSTS           ADDRESS   PORTS     AGE
web    nginx   web.k8s.local             80, 443   7s
#创建后测试
root@master01:~# curl -Lk http://web.k8s.local
root@master01:~# curl -Lkv http://web.k8s.local

# 7、K8s ServiceAccount 考题解析

pEKsYt0.png

Tasks->Configure Pods and Containers->Configure Service Accounts for Pods->Launch a Pod using service account token projection

root@master01:~# kubectl edit sa statsmonitor-sa -n monitoring
apiVersion: v1
automountServiceAccountToken: false
kind: ServiceAccount
metadata:
  creationTimestamp: "2024-11-20T14:44:16Z"
  name: statsmonitor-sa
  namespace: monitoring
  resourceVersion: "104395"
  uid: 13ed4adf-cadf-49a1-a7f1-31a424f433a5
  
root@master01:~# cat ~/statsmonitor/deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: statsmonitor
  name: statsmonitor
  namespace: monitoring
spec:
  replicas: 1
  selector:
    matchLabels:
      app: statsmonitor
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: statsmonitor
    spec:
      volumes:
      - name: token
        projected:
          sources:
          - serviceAccountToken:
              path: token
      serviceAccountName: statsmonitor-sa
      containers:
      - image: m.daocloud.io/docker.io/library/nginx:latest
        name: nginx
        volumeMounts:
        - mountPath: /var/run/secrets/kubernetes.io/serviceaccount/token
          name: token
          readOnly: true  #只读挂载
root@master01:~# kubectl apply -f  ~/statsmonitor/deployment.yaml

# 8、K8s NetworkPolicy 考题解析

pEKhc4J.png

Concepts->Services, Load Balancing, and Networking->Network Policies

root@master01:~# vim /home/candidate/KSCS00101/network-prolicy.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: defaultdeny
  namespace: production
spec:
  podSelector: {}
  policyTypes:
  - Ingress
 # - Engress
root@master01:~# kubectl apply -f  /home/candidate/KSCS00101/network-prolicy.yaml

# 9、K8s Cilium 考题解析

pEllBLT.png

security->Overview of Network Policy->Layer 4 Examples

root@master01:~# kubectl get pods -n cilium-policy --show-labels
NAME                      READY   STATUS    RESTARTS     AGE   LABELS
policy-858469dc69-rrhbz   1/1     Running   4 (8d ago)   92d   app=policy,pod-template-hash=858469dc69
root@master01:~# vim cilium-policy.yaml
apiVersion: "cilium.io/v2"
kind: CiliumNetworkPolicy
metadata:
  name: allow-ingress-host
  namespace: cilium-policy
spec:
  endpointSelector:
    matchLabels:
      app: policy
  ingress:
  - fromEndpoints:
    - matchLabels:
        k8s:io.kubernetes.pod.namespace: ingress-nginx
    authentication:
      mode: "required"
  - fromEntities:
    - "host" 
root@master01:~# kubectl apply -f cilium-policy.yaml

# 10、K8s SecurityContext 考题解析

pEK5OnP.png

root@master01:~# vim ~/confidential/nginx-unprivileged.yaml 
...
        securityContext:
          allowPrivilegeEscalation: false
          runAsNonRoot: true
          capabilities:
            drop: ["ALL"]
          seccompProfile:
            type: RuntimeDefault
root@master01:~# kubectl apply -f ~/confidential/nginx-unprivileged.yaml

# 11、K8s SecurityContext 配置变更考题

pEKIShQ.png

root@master01:~# kubectl edit deploy lamp-deployment -n app
...
    spec:
      containers:
      - image: m.daocloud.io/docker.io/library/nginx:latest
        imagePullPolicy: Always
        name: nginx
        resources: {}
        securityContext:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
          runAsUser: 30000
...

# 12、K8s 行为检测:Falco

pElYTTf.png

Concepts->rules->Basic of Falco Rules

#1. 创建 falco 规则文件
root@master01:~# vim /etc/falco/devmem_rule.yaml 
- rule: "monitor devmem"
  desc: "monitor devmem"
  condition: fd.name == '/dev/mem'
  output: "Container: container_id=%container.id reading /dev/mem"
  priority: NOTICE
  
#2. 执行扫描 (注意:需要查看题目是否声明了在哪个节点进行查询,如果没有指明节点,需要在所有的节点执行扫描)
root@node01:~# falco -M 60 -r /etc/falco/devmem_rule.yaml
...
21:44:14.368662183: Notice Container: container_id=f459ad4e0661 reading /dev/mem
...
#3. 使用 crictl 查询是哪个 Pod
root@node01:~# crictl ps |grep f459ad4e0661
WARN[0000] runtime connect using default endpoints: [unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead. 
WARN[0000] image connect using default endpoints: [unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead. 
f459ad4e06615       05455a08881ea       10 minutes ago      Running             alpine              0                   7ff2ea1a19ab9       cpu-677fcb7db7-nj6ll
#4. 回到控制节点(退出 SSH)并查看该 Pod 处于的空间:
root@master01:~# kubectl get pods -A|grep cpu-677fcb7db7-nj6ll
default         cpu-677fcb7db7-nj6ll                 1/1     Running            0                 48m
#5. 将该 Pod 的 deployment 副本设置为 0
 root@master01:~# kubectl scale deploy cpu --replicas=0 -n default

# 13、K8s 合规性扫描 Bom

pEMlQzV.png

#1. 查看 pod
root@master01:~# kubectl get pods -n bom
NAME                   READY   STATUS    RESTARTS      AGE
bom-6d7c56bd86-m4jmn   3/3     Running   3 (11m ago)   23h
#2. 可以看到是容器 alpine3 包含了 libcrypto3-3.0.15-r0
root@master01:~# kubectl exec -it bom-6d7c56bd86-m4jmn -n bom -- sh
Defaulted container "alpine1" out of: alpine1, alpine2, alpine3
/ # apk list |grep libcrypto3
WARNING: opening from cache https://dl-cdn.alpinelinux.org/alpine/v3.20/main: No such file or directory
WARNING: opening from cache https://dl-cdn.alpinelinux.org/alpine/v3.20/community: No such file or directory
libcrypto3-3.3.2-r0 x86_64 {openssl} (Apache-2.0) [installed]
root@master01:~# kubectl exec -it bom-6d7c56bd86-m4jmn -c alpine2 -n bom -- sh
/ # apk list |grep libcrypto3
WARNING: opening from cache https://dl-cdn.alpinelinux.org/alpine/v3.19/main: No such file or directory
WARNING: opening from cache https://dl-cdn.alpinelinux.org/alpine/v3.19/community: No such file or directory
libcrypto3-3.1.7-r0 x86_64 {openssl} (Apache-2.0) [installed]
root@master01:~# kubectl exec -it bom-6d7c56bd86-m4jmn -c alpine3 -n bom -- sh
/ # apk list|grep libcrypto3
WARNING: opening from cache https://dl-cdn.alpinelinux.org/alpine/v3.17/main: No such file or directory
WARNING: opening from cache https://dl-cdn.alpinelinux.org/alpine/v3.17/community: No such file or directory
libcrypto3-3.0.15-r0 x86_64 {openssl} (Apache-2.0) [installed]
#3. 查看容器 alpine3 对应的镜像
root@master01:~# kubectl edit deploy bom -n bom
...
      - command:
        - sleep
        - "360000"
        image: registry.cn-beijing.aliyuncs.com/dotbalo/alpine:3.17.10
        imagePullPolicy: IfNotPresent
...
#4. 使用 bom 为该镜像生成 SPDX
root@master01:~# mkdir ~/KSRS29J15
root@master01:~# bom generate -i registry.cn-beijing.aliyuncs.com/dotbalo/alpine:3.17.10 > ~/KSRS29J15/bom.spdx
#5. 修改~/bom-deployment.yaml 文件,删 alpine3 容器相关的配置
root@master01:~# vim ~/bom-deployment.yaml
...
name: alpine3
...
root@master01:~# kubectl replace -f  ~/bom-deployment.yaml

# 14、K8s 审计日志概念理解

pElcBdg.png

Tasks->Monitoring, Logging, and Debugging->Troubleshooting Clusters->Auditing

#1. 创建审计日志规则
root@master01:~# mkdir /etc/kubernetes/logpolicy  
root@master01:~# mkdir /var/log/kubernetes
root@master01:~# cat /etc/kubernetes/logpolicy/sample-policy.yaml
apiVersion: audit.k8s.io/v1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: "batch"
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["cronjobs"]
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["persistentvolumes"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["front-apps"]
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]
  - level: Metadata
#2. 应用审计日志规则
root@master01:~# vim /etc/kubernetes/logpolicy/sample-policy.yaml
...
    - --audit-policy-file=/etc/kubernetes/logpolicy/sample-policy.yaml
    - --audit-log-path=/var/log/kubernetes/kubernetes-logs.txt
    - --audit-log-maxage=30
    - --audit-log-maxbackup=10
...
    volumeMounts:
    - mountPath: /etc/kubernetes/logpolicy/sample-policy.yaml
      name: audit
      readOnly: true
    - mountPath: /var/log/kubernetes/
      name: audit-log
      readOnly: false
...
  volumes:
...
  - name: audit
    hostPath:
      path: /etc/kubernetes/logpolicy/sample-policy.yaml
      type: File
  - name: audit-log
    hostPath:
      path: /var/log/kubernetes/
      type: DirectoryOrCreate
...
#3. 重启 kubelet
root@master01:~# systemctl daemon-reload
root@master01:~# systemctl restart kubelet
#4. 查看日志
root@master01:~# tail -f /var/log/kubernetes/kubernetes-logs.txt

# 15、K8s ImagePolicyWebhook

pEM36PA.png

#1. 切换 Context 后,ssh 到对应 master 节点
root@master01:~# ssh master01
#2. 关闭默认允许
root@master01:~# vim /etc/kubernetes/epconfig/admission_configuration.json 
{
  "imagePolicy": {
     "kubeConfigFile": "/etc/kubernetes/epconfig/kubeconfig.yaml",
     "allowTTL": 50,
     "denyTTL": 50,
     "retryBackoff": 500,
     "defaultAllow": false   #'defaultAllow': false # 改成 false
  }
}
#3. 配置 Webhook 地址
root@master01:~# vim /etc/kubernetes/epconfig/kubeconfig.yaml 
...
- cluster:
    certificate-authority: /etc/kubernetes/pki/server.crt
    server: https://wakanda.local:8082/image_policy  
...
#4. 开启 ImagePolicyWebhook
root@master01:~# cat /etc/kubernetes/manifests/kube-apiserver.yaml 
...
    - --enable-admission-plugins=NodeRestriction,ImagePolicyWebhook
    - --admission-control-config-file=/etc/kubernetes/epconfig/admission_configuration.json
...
    volumeMounts:
    - mountPath: /etc/kubernetes/epconfig
      name: epconfig
      readOnly: true
...
  volumes:
  - hostPath:
      path: /etc/kubernetes/epconfig
    name: epconfig
...
#5. 重启服务
root@master01:~# systemctl daemon-reload
root@master01:~# systemctl restart kubelet
#6. 测试
root@master01:~# kubectl apply -f /root/KSSC00202/configuration-test.yml
root@master01:~# kubectl describe rc nginx-latest
...
  Warning  FailedCreate  19s (x4 over 37s)  replication-controller  (combined from similar events): Error creating: pods "nginx-latest-k69tx" is forbidden: image policy webhook backend denied one or more images: Images using latest tag are not allowed

# 16、K8s 集群升级考题

pEM8JeS.png

Tasks->Administer a Cluster->Administer with kubeadm->Upgrade kubeadm Cluster->Upgrading Linux nodes

#1. 切换 context
root@master01:~# kubectl config use-context k8s008
#2. 查看集群会发现 master 和 node 版本不一样
root@master01:~# kubectl get nodes
NAME       STATUS   ROLES           AGE   VERSION
master01   Ready    control-plane   89d   v1.31.2
node01     Ready    <none>          89d   v1.31.1
#3. 切换 node 节点
root@master01:~# ssh node01
sudo apt-mark unhold kubeadm && \
sudo apt-get update && sudo apt-get install -y kubeadm='1.31.2-*' && \
sudo apt-mark hold kubeadm
root@node01:~# sudo kubeadm upgrade node
#4. 切换 master 节点或控制端,驱逐 Pod
root@master01:~# kubectl drain node01 --ignore-daemonsets  
#5.node01 节点变为禁止调度状态
root@master01:~# kubectl get nodes
NAME       STATUS                     ROLES           AGE   VERSION
master01   Ready                      control-plane   90d   v1.31.2
node01     Ready,SchedulingDisabled   <none>          90d   v1.31.1
#6. 切换 node 节点
root@node01:~# sudo apt-mark unhold kubelet kubectl && \
root@node01:~# sudo apt-get update && sudo apt-get install -y kubelet='1.31.2-*' kubectl='1.31.2-*' && \
sudo apt-mark hold kubelet kubectl
root@node01:~# sudo systemctl daemon-reload
root@node01:~# sudo systemctl restart kubelet
#7. 切换 master 节点
root@master01:~# kubectl uncordon node01
root@master01:~# kubectl get nodes
NAME       STATUS   ROLES           AGE   VERSION
master01   Ready    control-plane   90d   v1.31.2
node01     Ready    <none>          90d   v1.31.2
此文章已被阅读次数:正在加载...更新于

请我喝[茶]~( ̄▽ ̄)~*

Xu Yong 微信支付

微信支付

Xu Yong 支付宝

支付宝