# K8s 准入控制 ResourceQuota、LimitRange、QoS 服务质量

# 1. ResourceQuota 配置解析

ResourceQuotas 实现资源配额,避免过度创建资源,针对 namespace 进行限制。cpu 内存则是根据 pod 配置的 resources 总额进行限制,如果没有配置 resources 参数则无法限制。

apiVersion: v1
kind: ResourceQuota
metadata:
  name: resourcequota-test
  namespace: test
  labels:
    app: resourcequota
spec:
  hard:
    pods: 3
    requests.cpu: 3
    requests.memory: 512Mi
    limits.cpu: 8
    limits.memory: 16Gi
    configmaps: 201
    requests.storage: 40Gi
    persistentvolumeclaims: 20
    replicationcontrollers: 20
    secrets: 20
    services: 50
    services.loadbalancers: "2"
    services.nodeports: "10"
  • pods:限制最多启动 Pod 的个数
  • requests.cpu:限制最高 CPU 请求数
  • requests.memory:限制最高内存的请求数
  • limits.cpu:限制最高 CPU 的 limit 上限
  • limits.memory:限制最高内存的 limit 上限
  • services:限制 services 数量
  • services.nodeports:限制 services 中 nodeport 类型 service 数量
  • services.loadbalancers:限制 services 中 loadbalancers 类型 service 数量
# 1.1 ResourceQuota 配置示例
#1.限制test命名空间pods数量量为3、configmap数量为2
[root@k8s-master01 resourcequota]# cat rq-test.yaml
apiVersion: v1
kind: ResourceQuota
metadata:
  name: resourcequota-test
  namespace: test
  labels:
    app: resourcequota
spec:
  hard:
    pods: 3
#    requests.cpu: 3
#    requests.memory: 512Mi
#    limits.cpu: 8
#    limits.memory: 16Gi
    configmaps: 2
#    requests.storage: 40Gi
#    persistentvolumeclaims: 20
#    replicationcontrollers: 20
#    secrets: 20
#    services: 50
#    services.loadbalancers: "2"
#    services.nodeports: "10"

#2.test命名空间已创建configmap数量为1,限制数量为2
[root@k8s-master01 resourcequota]# kubectl get resourcequota -n test
NAME                 AGE   REQUEST                      LIMIT
resourcequota-test   61s   configmaps: 1/2, pods: 0/3  

#3.test命名空间创建第2个configmap时正常,创建第3个configmap时报错
[root@k8s-master01 resourcequota]# kubectl create cm rq-cm1 -n test --from-literal=key1=value1
[root@k8s-master01 resourcequota]# kubectl create cm rq-cm2 -n test --from-literal=key2=value2
error: failed to create configmap: configmaps "rq-cm2" is forbidden: exceeded quota: resourcequota-test, requested: configmaps=1, used: configmaps=2, limited: configmaps=2

# 2. LimitRange 配置解析

虽然 ResourceQuota 可以实现资源配额,可以限制某个命名空间内存和 CPU,但是如果创建的 Pod 都没有配置 resources 参数则无法限制。如果配置 LimitRange,Pod 没有配置 resources 情况下,创建的 Pod 会根据 LimitRange 配置自动添加 CPU 内存配置,并且可以限制 resources 参数最大配置和最小配置,LimitRange 针对 Pod 进行限制。

apiVersion: v1
kind: LimitRange
metadata:
  name: cpu-mem-limit-range
  namespace: test
spec:
  limits:
  - default:         #限制CPU内存默认limits配置
      cpu: 1
      memory: 512Mi
    defaultRequest:  #限制CPU内存默认request配置
      cpu: 0.5
      memory: 256Mi
    max:                #限制CPU内存最大配置 
      cpu: "4000m"
      memory: 4Gi
    min:                #限制CPU内存最小配置
      cpu: "100m"
      memory: 100Mi
    type: Container
  - type: PersistentVolumeClaim    #限制pvc大小
    max:
      storage: 2Gi
    min:
      storage: 1Gi
  • default:默认 limits 配置
  • defaultRequest:默认 requests 配置
# 2.1 配置默认的 requests 和 limits

Pod 没有配置 resources 情况下,创建的 Pod 会根据 LimitRange 配置自动添加 CPU 内存配置。

#1.创建LimitRange
[root@k8s-master01 resourcequota]# cat limitrange.yaml 
apiVersion: v1
kind: LimitRange
metadata:
  name: cpu-mem-limit-range
  namespace: test
spec:
  limits:
  - default:         #限制CPU内存默认limits配置
      cpu: 1
      memory: 512Mi
    defaultRequest:  #限制CPU内存默认request配置
      cpu: 0.5
      memory: 256Mi
    max:                #限制CPU内存最大配置 
      cpu: "4000m"
      memory: 4Gi
    min:                #限制CPU内存最小配置
      cpu: "100m"
      memory: 100Mi
    type: Container
  - type: PersistentVolumeClaim    #限制pvc大小
    max:
      storage: 2Gi
    min:
      storage: 1Gi  
      
[root@k8s-master01 resourcequota]# kubectl apply -f limitrange.yaml
[root@k8s-master01 resourcequota]# kubectl get limitrange -n test
NAME                  CREATED AT
cpu-mem-limit-range   2025-04-23T07:55:03Z

#2.创建deployment, 查看是否会根据LimitRange自动添加CPU内存配置
[root@k8s-master01 resourcequota]# cat deploy-limitrange.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-limirange
  labels:
    app: deploy-limirange
  namespace: test
spec:
  selector:
    matchLabels:
      app: deploy-limirange
  replicas: 1
  template:
    metadata:
      labels:
        app: deploy-limirange
    spec:
      restartPolicy: Always
      containers:
        - name: deploy-limirange
          image: nginx
          imagePullPolicy: IfNotPresent

[root@k8s-master01 resourcequota]# kubectl get pod -n test
NAME                                READY   STATUS    RESTARTS   AGE
deploy-limirange-854c9545ff-grpxr   1/1     Running   0          39s
[root@k8s-master01 resourcequota]# kubectl get pod -n test -oyaml
...
  spec:
    containers:
    - image: nginx
      imagePullPolicy: IfNotPresent
      name: deploy-limirange
      resources:
        limits:
          cpu: "1"
          memory: 512Mi
        requests:
          cpu: 500m
          memory: 256Mi
...
# 2.2 限制 requests 和 limits 范围
#1.创建LimitRange
[root@k8s-master01 resourcequota]# cat limitrange.yaml 
apiVersion: v1
kind: LimitRange
metadata:
  name: cpu-mem-limit-range
  namespace: test
spec:
  limits:
  - default:         #限制CPU内存默认limits配置
      cpu: 1
      memory: 512Mi
    defaultRequest:  #限制CPU内存默认request配置
      cpu: 0.5
      memory: 256Mi
    max:                #限制CPU内存最大配置 
      cpu: "4000m"
      memory: 4Gi
    min:                #限制CPU内存最小配置
      cpu: "100m"
      memory: 100Mi
    type: Container
  - type: PersistentVolumeClaim    #限制pvc大小
    max:
      storage: 2Gi
    min:
      storage: 1Gi  

#2.创建deployment, CPU内存limits和requests高于/低于LimitRangeCPU内存max、min配置
[root@k8s-master01 resourcequota]# cat deploy-limitrange.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-limirange
  labels:
    app: deploy-limirange
  namespace: test
spec:
  selector:
    matchLabels:
      app: deploy-limirange
  replicas: 1
  template:
    metadata:
      labels:
        app: deploy-limirange
    spec:
      restartPolicy: Always
      containers:
        - name: deploy-limirange
          image: nginx
          imagePullPolicy: IfNotPresent
          resources:
            limits:
              memory: 8096Mi
              cpu: 5
            requests:
              memory: 64Mi
              cpu: 10m

#3.由于创建deployment, CPU内存limits和requests高于/低于LimitRangeCPU内存max、min配置,pod没有创建
[root@k8s-master01 resourcequota]# kubectl create -f deploy-limitrange.yaml 

[root@k8s-master01 resourcequota]# kubectl get deploy deploy-limirange -n test
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
deploy-limirange   0/1     0            0           2m7s
[root@k8s-master01 resourcequota]# kubectl get pods -n test

[root@k8s-master01 resourcequota]# kubectl describe rs deploy-limirange-54c5d69b4b -n test
Name:           deploy-limirange-54c5d69b4b
Namespace:      test
Selector:       app=deploy-limirange,pod-template-hash=54c5d69b4b
Labels:         app=deploy-limirange
                pod-template-hash=54c5d69b4b
Annotations:    deployment.kubernetes.io/desired-replicas: 1
                deployment.kubernetes.io/max-replicas: 2
                deployment.kubernetes.io/revision: 1
Controlled By:  Deployment/deploy-limirange
Replicas:       0 current / 1 desired
Pods Status:    0 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  app=deploy-limirange
           pod-template-hash=54c5d69b4b
  Containers:
   deploy-limirange:
    Image:      nginx
    Port:       <none>
    Host Port:  <none>
    Limits:
      cpu:     5
      memory:  8096Mi
    Requests:
      cpu:         10m
      memory:      64Mi
    Environment:   <none>
    Mounts:        <none>
  Volumes:         <none>
  Node-Selectors:  <none>
  Tolerations:     <none>
Conditions:
  Type             Status  Reason
  ----             ------  ------
  ReplicaFailure   True    FailedCreate
Events:
  Type     Reason        Age                 From                   Message
  ----     ------        ----                ----                   -------
  Warning  FailedCreate  3m8s                replicaset-controller  Error creating: pods "deploy-limirange-54c5d69b4b-zxhzk" is forbidden: [minimum cpu usage per Container is 100m, but request is 10m, minimum memory usage per Container is 100Mi, but request is 64Mi, maximum cpu usage per Container is 4, but limit is 5, maximum memory usage per Container is 4Gi, but limit is 8096Mi]
# 2.3 限制存储空间大小
#1.创建LimitRange
[root@k8s-master01 resourcequota]# cat limitrange.yaml 
apiVersion: v1
kind: LimitRange
metadata:
  name: cpu-mem-limit-range
  namespace: test
spec:
  limits:
  - default:         #限制CPU内存默认limits配置
      cpu: 1
      memory: 512Mi
    defaultRequest:  #限制CPU内存默认request配置
      cpu: 0.5
      memory: 256Mi
    max:                #限制CPU内存最大配置 
      cpu: "4000m"
      memory: 4Gi
    min:                #限制CPU内存最小配置
      cpu: "100m"
      memory: 100Mi
    type: Container
  - type: PersistentVolumeClaim    #限制pvc大小
    max:
      storage: 2Gi
    min:
      storage: 1Gi  
  
#2.由于创建的pvc大于2G,所以报错  
[root@k8s-master01 ~]# cat pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: sc-pvc-001
spec:
  storageClassName: "nfs-storage"     # 明确指定使用哪个sc的供应商来创建pv
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 3Gi                      # 根据业务实际大小进行资源申请  
[root@k8s-master01 ~]# kubectl create -f pvc.yaml -n test
Error from server (Forbidden): error when creating "pvc.yaml": persistentvolumeclaims "sc-pvc-001" is forbidden: maximum storage usage per PersistentVolumeClaim is 2Gi, but request is 3Gi

# 3. 服务质量 QoS

  • Guaranteed:最高服务质量,当宿主机内存不够时,会先 kill 掉 QoS 为 BestEffort 和 Burstable 的 Pod,如果内存还是不够,才会 kill 掉 QoS 为 Guaranteed,该级别 Pod 的资源占用量一般比较明确,即 requests 的 cpu 和 memory 和 limits 的 cpu 和 memory 配置的一致。
  • Burstable: 服务质量低于 Guaranteed,当宿主机内存不够时,会先 kill 掉 QoS 为 BestEffort 的 Pod,如果内存还是不够之后就会 kill 掉 QoS 级别为 Burstable 的 Pod,用来保证 QoS 质量为 Guaranteed 的 Pod,该级别 Pod 一般知道最小资源使用量,但是当机器资源充足时,还是想尽可能的使用更多的资源,即 limits 字段的 cpu 和 memory 大于 requests 的 cpu 和 memory 的配置。
  • BestEffort:尽力而为,当宿主机内存不够时,首先 kill 的就是该 QoS 的 Pod,用以保证 Burstable 和 Guaranteed 级别的 Pod 正常运行。
# 3.1 实现 QoS 为 Guaranteed 的 Pod
  1. Pod 中的每个容器必须指定 limits.memory 和 requests.memory,并且两者需要相等;

  2. Pod 中的每个容器必须指定 limits.cpu 和 limits.memory,并且两者需要相等。

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deploy
  labels:
    app: nginx-deploy
  namespace: default
spec:
  selector:
    matchLabels:
      app: nginx-deploy
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx-deploy
    spec:
      restartPolicy: Always
      containers:
        - name: nginx-deploy
          image: nginx
          imagePullPolicy: IfNotPresent
          resources:
            limits:
              memory: 1024Mi
              cpu: 1
            requests:
              memory: 1024Mi
              cpu: 1
# 3.2 实现 QoS 为 Burstable 的 Pod
  1. Pod 不符合 Guaranteed 的配置要求;

  2. Pod 中至少有一个容器配置了 requests.cpu 或 requests.memory。

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deploy
  labels:
    app: nginx-deploy
  namespace: default
spec:
  selector:
    matchLabels:
      app: nginx-deploy
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx-deploy
    spec:
      restartPolicy: Always
      containers:
        - name: nginx-deploy
          image: nginx
          imagePullPolicy: IfNotPresent
          resources:
            limits:
              memory: 1024Mi
              cpu: 1
            requests:
              memory: 128Mi
              cpu: 100m
# 3.3 实现 QoS 为 BestEffort 的 Pod
  1. 不设置 resources 参数
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deploy
  labels:
    app: nginx-deploy
  namespace: default
spec:
  selector:
    matchLabels:
      app: nginx-deploy
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx-deploy
    spec:
      restartPolicy: Always
      containers:
        - name: nginx-deploy
          image: nginx
          imagePullPolicy: IfNotPresent

本文出自于:https://edu.51cto.com/course/23845.html

此文章已被阅读次数:正在加载...更新于

请我喝[茶]~( ̄▽ ̄)~*

Xu Yong 微信支付

微信支付

Xu Yong 支付宝

支付宝