# Kubeadm 高可用安装 K8s 集群

# 1. 基本配置

# 1.1 基本环境配置
主机名IP 地址说明
k8s-master01 ~ 03192.168.1.71 ~ 73master 节点 * 3
/192.168.1.70keepalived 虚拟 IP(不占用机器)
k8s-node01 ~ 02192.168.1.74/75worker 节点 * 2

请统一替换这些网段,Pod 网段和 service 和宿主机网段不要重复!!!

* 配置信息 *备注
系统版本Rocky Linux 8/9
Containerdlatest
Pod 网段172.16.0.0/16
Service 网段10.96.0.0/16

所有节点更改主机名(其它节点按需修改):

hostnamectl set-hostname k8s-master01 

所有节点配置 hosts,修改 /etc/hosts 如下:

[root@k8s-master01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.71 k8s-master01
192.168.1.72 k8s-master02
192.168.1.73 k8s-master03
192.168.1.74 k8s-node01
192.168.1.75 k8s-node02

所有节点配置 yum 源:

# 配置基础源
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
    -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
    -i.bak \
    /etc/yum.repos.d/*.repo

yum makecache

所有节点必备工具安装:

yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git rsyslog -y

所有节点关闭防火墙、selinux、dnsmasq、swap、开启 rsyslog。服务器配置如下:

systemctl disable --now firewalld 
systemctl disable --now dnsmasq
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
systemctl enable --now rsyslog

所有节点关闭 swap 分区:

swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

所有节点安装 ntpdate:

sudo dnf install epel-release -y
sudo dnf config-manager --set-enabled epel
sudo dnf install ntpsec

所有节点同步时间并配置上海时区:

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com
# 加入到crontab
crontab -e
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

所有节点配置 limit:

ulimit -SHn 65535
vim /etc/security/limits.conf
# 末尾添加如下内容
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

所有节点升级系统:

yum update -y

Master01 节点免密钥登录其他节点,安装过程中生成配置文件和证书均在 Master01 上操作,集群管理也在 Master01 上操作:

ssh-keygen -t rsa
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

注意:公有云环境,可能需要把 kubectl 放在一个非 Master 节点上

Master01 节点下载安装所有的源码文件:

cd /root/ ; git clone https://gitee.com/chinagei/k8s-ha-install
# 1.2 内核配置

所有节点安装 ipvsadm:

yum install ipvsadm ipset sysstat conntrack libseccomp -y

所有节点配置 ipvs 模块:

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

所有节点创建 ipvs.conf,并配置开机自动加载:

vim /etc/modules-load.d/ipvs.conf 
# 加入以下内容
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

所有节点然后执行 systemctl enable --now systemd-modules-load.service 即可(报错不用管)

systemctl enable --now systemd-modules-load.service

所有节点内核优化配置:

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
net.ipv4.conf.all.route_localnet = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

所有节点应用配置:

sysctl --system

所有节点配置完内核后,重启机器,之后查看内核模块是否已自动加载:

reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack

# 2. 高可用组件安装

注意:如果安装的不是高可用集群,haproxy 和 keepalived 无需安装

注意:公有云要用公有云自带的负载均衡,比如阿里云的 SLB、NLB,腾讯云的 ELB,用来替代 haproxy 和 keepalived,因为公有云大部分都是不支持 keepalived 的。

所有 Master 节点通过 yum 安装 HAProxy 和 KeepAlived:

yum install keepalived haproxy -y

所有 Master 节点配置 HAProxy,需要注意黄色部分的 IP:

[root@k8s-master01 etc]# mkdir /etc/haproxy
[root@k8s-master01 etc]# vim /etc/haproxy/haproxy.cfg 
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443       #HAProxy监听端口
  bind 127.0.0.1:16443     #HAProxy监听端口
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01	192.168.1.71:6443  check       #API Server IP地址
  server k8s-master02	192.168.1.72:6443  check       #API Server IP地址
  server k8s-master03	192.168.1.73:6443  check       #API Server IP地址

所有 Master 节点配置 KeepAlived,需要注意黄色部分的配置。

Master01 节点的配置:

[root@k8s-master01 etc]# mkdir /etc/keepalived

[root@k8s-master01 ~]# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface ens160               #网卡名称
    mcast_src_ip 192.168.1.71      #K8s-master01 IP地址
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.70        #VIP地址
    }
    track_script {
       chk_apiserver
    }
}	

Master02 节点的配置:

# vim /etc/keepalived/keepalived.conf 

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
   interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens160                #网卡名称
    mcast_src_ip 192.168.1.72       #K8s-master02 IP地址
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.70              #VIP地址
    }
    track_script {
       chk_apiserver
    }
}

Master03 节点的配置:

# vim /etc/keepalived/keepalived.conf 

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
 interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens160                 #网卡名称
    mcast_src_ip 192.168.1.73        #K8s-master03 IP地址
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.70          #VIP地址
    }
    track_script {
       chk_apiserver
    }
}

所有 master 节点配置 KeepAlived 健康检查文件:

[root@k8s-master01 keepalived]# vim /etc/keepalived/check_apiserver.sh 
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

所有 master 节点配置健康检查文件添加执行权限:

chmod +x /etc/keepalived/check_apiserver.sh

所有 master 节点启动 haproxy 和 keepalived:

[root@k8s-master01 keepalived]# systemctl daemon-reload
[root@k8s-master01 keepalived]# systemctl enable --now haproxy
[root@k8s-master01 keepalived]# systemctl enable --now keepalived

重要:如果安装了 keepalived 和 haproxy,需要测试 keepalived 是否是正常的

所有节点测试VIP
[root@k8s-master01 ~]# ping 192.168.1.70 -c 4
PING 192.168.1.70 (192.168.1.70) 56(84) bytes of data.
64 bytes from 192.168.1.70: icmp_seq=1 ttl=64 time=0.464 ms
64 bytes from 192.168.1.70: icmp_seq=2 ttl=64 time=0.063 ms
64 bytes from 192.168.1.70: icmp_seq=3 ttl=64 time=0.062 ms
64 bytes from 192.168.1.70: icmp_seq=4 ttl=64 time=0.063 ms

[root@k8s-master01 ~]# telnet 192.168.1.70 16443
Trying 192.168.1.70...
Connected to 192.168.1.70.
Escape character is '^]'.
Connection closed by foreign host.

如果 ping 不通且 telnet 没有出现 ] ,则认为 VIP 不可以,不可在继续往下执行,需要排查 keepalived 的问题,比如防火墙和 selinux,haproxy 和 keepalived 的状态,监听端口等

  • 所有节点查看防火墙状态必须为 disable 和 inactive:systemctl status firewalld
  • 所有节点查看 selinux 状态,必须为 disable:getenforce
  • master 节点查看 haproxy 和 keepalived 状态:systemctl status keepalived haproxy
  • master 节点查看监听端口:netstat -lntp

如果以上都没有问题,需要确认:

  1. 是否是公有云机器

  2. 是否是私有云机器(类似 OpenStack)

上述公有云一般都是不支持 keepalived,私有云可能也有限制,需要和自己的私有云管理员咨询

# 3. Runtime 安装

如果安装的版本低于 1.24,选择 Docker 和 Containerd 均可,高于 1.24 建议选择 Containerd 作为 Runtime,不再推荐使用 Docker 作为 Runtime。

# 3.1 安装 Containerd

所有节点配置安装源:

yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

所有节点安装 docker-ce(如果在以前已经安装过,需要重新安装更新一下):

# yum install docker-ce containerd -y

可以无需启动 Docker,只需要配置和启动 Containerd 即可。

首先配置 Containerd 所需的模块(所有节点):

# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

所有节点加载模块:

# modprobe -- overlay
# modprobe -- br_netfilter

所有节点,配置 Containerd 所需的内核:

# cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

所有节点加载内核:

# sysctl --system

所有节点生成 Containerd 的配置文件:

# mkdir -p /etc/containerd
# containerd config default | tee /etc/containerd/config.toml

所有节点更改 Containerd 的 Cgroup 和 Pause 镜像配置:

sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml
sed -i 's#k8s.gcr.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g'  /etc/containerd/config.toml
sed -i 's#registry.gcr.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g'  /etc/containerd/config.toml
sed -i 's#registry.k8s.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g'  /etc/containerd/config.toml

所有节点启动 Containerd,并配置开机自启动:

# systemctl daemon-reload
# systemctl enable --now containerd

所有节点配置 crictl 客户端连接的运行时位置(可选):

# cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

# 4 . 安装 Kubernetes 组件

所有节点配置源(注意更改版本号):

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.32/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.32/rpm/repodata/repomd.xml.key
EOF

首先在 Master01 节点查看最新的 Kubernetes 版本是多少:

# yum list kubeadm.x86_64 --showduplicates | sort -r

所有节点安装 1.32 最新版本 kubeadm、kubelet 和 kubectl:

# yum install kubeadm-1.32* kubelet-1.32* kubectl-1.32* -y

所有节点设置 Kubelet 开机自启动(由于还未初始化,没有 kubelet 的配置文件,此时 kubelet 无法启动,无需关心):

# systemctl daemon-reload
# systemctl enable --now kubelet

此时 kubelet 是起不来的,日志会有报错不影响!

# 5 . 集群初始化

以下操作在 master01(注意黄色部分):

vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.71
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
---
apiServer:
  certSANs:
  - 192.168.1.70               # 如果搭建的不是高可用集群,把此处改为master的IP
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.1.70:16443 # 如果搭建的不是高可用集群,把此处IP改为master的IP,端口改成6443
controllerManager: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.32.3    # 更改此处的版本号和kubeadm version一致
networking:
  dnsDomain: cluster.local
  podSubnet: 172.16.0.0/16    # 注意此处的网段,不要与service和节点网段冲突
  serviceSubnet: 10.96.0.0/16 # 注意此处的网段,不要与pod和节点网段冲突
scheduler: {}

master01 节点更新 kubeadm 文件:

kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml

将 new.yaml 文件复制到其他 master 节点:

for i in k8s-master02 k8s-master03; do scp new.yaml $i:/root/; done

之后所有 Master 节点提前下载镜像,可以节省初始化时间(其他节点不需要更改任何配置,包括 IP 地址也不需要更改):

kubeadm config images pull --config /root/new.yaml 

正确的反馈信息如下(* 版本可能不一样 *):

[root@k8s-master02 ~]# kubeadm config images pull --config /root/new.yaml 
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.32.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.32.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.32.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.32.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.16-0

Master01 节点初始化,初始化以后会在 /etc/kubernetes 目录下生成对应的证书和配置文件,之后其他 Master 节点加入 Master01 即可:

kubeadm init --config /root/new.yaml  --upload-certs

初始化成功以后,会产生 Token 值,用于其他节点加入时使用,因此要记录下初始化成功生成的 token 值(令牌值):

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

# 不要复制文档当中的,要去使用节点生成的
  kubeadm join 192.168.1.70:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:df72788de04bbc2e8fca70becb8a9e8503a962b5d7cd9b1842a0c39930d08c94 \
	--control-plane --certificate-key c595f7f4a7a3beb0d5bdb75d9e4eff0a60b977447e76c1d6885e82c3aa43c94c

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.70:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:df72788de04bbc2e8fca70becb8a9e8503a962b5d7cd9b1842a0c39930d08c94

Master01 节点配置环境变量,用于访问 Kubernetes 集群:

cat <<EOF >> /root/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

Master01 节点查看节点状态:(显示 NotReady 不影响)

# kubectl get node
NAME           STATUS     ROLES           AGE   VERSION
k8s-master01   NotReady   control-plane   24s   v1.32.3

采用初始化安装方式,所有的系统组件均以容器的方式运行并且在 kube-system 命名空间内,此时可以查看 Pod 状态(显示 pending 不影响):

# kubectl get pods -n kube-system
# 5.1 初始化失败排查

如果初始化失败,重置后再次初始化,命令如下(没有失败不要执行):

kubeadm reset -f ; ipvsadm --clear  ; rm -rf ~/.kube

如果多次尝试都是初始化失败,需要看系统日志,CentOS/RockyLinux 日志路径:/var/log/messages,Ubuntu 系列日志路径:/var/log/syslog:

tail -f /var/log/messages | grep -v "not found"

经常出错的原因:

  1. Containerd 的配置文件修改的不对,自行参考《安装 containerd》小节核对
  2. new.yaml 配置问题,比如非高可用集群忘记修改 16443 端口为 6443
  3. new.yaml 配置问题,三个网段有交叉,出现 IP 地址冲突
  4. VIP 不通导致无法初始化成功,此时 messages 日志会有 VIP 超时的报错
# 5.2 高可用 Master

其他 master 加入集群,master02 和 master03 分别执行 (千万不要在 master01 再次执行,不能直接复制文档当中的命令,而是你自己刚才 master01 初始化之后产生的命令)

kubeadm join 192.168.1.70:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:df72788de04bbc2e8fca70becb8a9e8503a962b5d7cd9b1842a0c39930d08c94 \
	--control-plane --certificate-key c595f7f4a7a3beb0d5bdb75d9e4eff0a60b977447e76c1d6885e82c3aa43c94c

查看当前状态:(如果显示 NotReady 不影响)

# kubectl get node
NAME           STATUS     ROLES           AGE     VERSION
k8s-master01   NotReady   control-plane   4m23s   v1.32.3
k8s-master02   NotReady   control-plane   66s     v1.32.3
k8s-master03   NotReady   control-plane   14s     v1.32.3
# 5.3 Token 过期处理

注意:以下步骤是上述 init 命令产生的 Token 过期了才需要执行以下步骤,如果没有过期不需要执行,直接 join 即可。

Token 过期后生成新的 token:

kubeadm token create --print-join-command

Master 需要生成 --certificate-key:

kubeadm init phase upload-certs  --upload-certs

# 6. Node 节点的配置

Node 节点上主要部署公司的一些业务应用,生产环境中不建议 Master 节点部署系统组件之外的其他 Pod,测试环境可以允许 Master 节点部署 Pod 以节省系统资源。

kubeadm join 192.168.1.70:16443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:377702f508fe70b9d8ab68beccaa9af1b4609b754e4cc2fcc6185974e1d620b5

所有节点初始化完成后,查看集群状态(NotReady 不影响)

# kubectl get node
NAME           STATUS     ROLES           AGE     VERSION
k8s-master01   NotReady   control-plane   4m23s   v1.32.3
k8s-master02   NotReady   control-plane   66s     v1.32.3
k8s-master03   NotReady   control-plane   14s     v1.32.3
k8s-node01     NotReady   <none>          13s     v1.32.3
k8s-node02     NotReady   <none>          10s     v1.32.3

# 7. Calico 组件的安装

所有节点禁止 NetworkManager 管理 Calico 的网络接口,防止有冲突或干扰:

cat >>/etc/NetworkManager/conf.d/calico.conf<<EOF
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
EOF
systemctl daemon-reload
systemctl restart NetworkManager

以下步骤只在 master01 执行(.x 不需要更改):

cd /root/k8s-ha-install && git checkout manual-installation-v1.32.x && cd calico/

修改 Pod 网段:

POD_SUBNET=`cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'`

sed -i "s#POD_CIDR#${POD_SUBNET}#g" calico.yaml
kubectl apply -f calico.yaml

查看容器和节点状态:

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-6f497d8478-v2q8c   1/1     Running   0          24h
calico-node-7mzmb                          1/1     Running   0          24h
calico-node-ljqnl                          1/1     Running   0          24h
calico-node-njqlb                          1/1     Running   0          24h
calico-node-ph4m4                          1/1     Running   0          24h
calico-node-rx8rl                          1/1     Running   0          24h
coredns-76fccbbb6b-76559                   1/1     Running   0          24h
coredns-76fccbbb6b-hkvn7                   1/1     Running   0          24h
etcd-k8s-master01                          1/1     Running   0          24h
etcd-k8s-master02                          1/1     Running   0          24h
etcd-k8s-master03                          1/1     Running   0          24h
kube-apiserver-k8s-master01                1/1     Running   0          24h
kube-apiserver-k8s-master02                1/1     Running   0          24h
kube-apiserver-k8s-master03                1/1     Running   0          24h
kube-controller-manager-k8s-master01       1/1     Running   0          24h
kube-controller-manager-k8s-master02       1/1     Running   0          24h
kube-controller-manager-k8s-master03       1/1     Running   0          24h
kube-proxy-9dtz4                           1/1     Running   0          24h
kube-proxy-jh7rl                           1/1     Running   0          24h
kube-proxy-jvvwt                           1/1     Running   0          24h
kube-proxy-sh89l                           1/1     Running   0          24h
kube-proxy-t2j49                           1/1     Running   0          24h
kube-scheduler-k8s-master01                1/1     Running   0          24h
kube-scheduler-k8s-master02                1/1     Running   0          24h
kube-scheduler-k8s-master03                1/1     Running   0          24h
metrics-server-7d9d8df576-jgnp2            1/1     Running   0          24h

此时节点全部变为 Ready 状态:

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES           AGE   VERSION
k8s-master01   Ready    control-plane   24h   v1.32.3
k8s-master02   Ready    control-plane   24h   v1.32.3
k8s-master03   Ready    control-plane   24h   v1.32.3
k8s-node01     Ready    <none>          24h   v1.32.3
k8s-node02     Ready    <none>          24h   v1.32.3

# 8. Metrics 部署

在新版的 Kubernetes 中系统资源的采集均使用 Metrics-server,可以通过 Metrics 采集节点和 Pod 的内存、磁盘、CPU 和网络的使用率。

Master01 节点的 front-proxy-ca.crt 复制到所有 Node 节点

scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node01:/etc/kubernetes/pki/front-proxy-ca.crt

scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node(其他节点自行拷贝):/etc/kubernetes/pki/front-proxy-ca.crt

以下操作均在 master01 节点执行:

安装 metrics server

cd /root/k8s-ha-install/kubeadm-metrics-server

# kubectl  create -f comp.yaml 
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created

查看状态:

[root@k8s-master01 ~]# kubectl get po -n kube-system -l k8s-app=metrics-server
NAME                              READY   STATUS    RESTARTS   AGE
metrics-server-7d9d8df576-jgnp2   1/1     Running   0          24h

等 Pod 变成 1/1 Running 后,查看节点和 Pod 资源使用率:

[root@k8s-master01 ~]#  kubectl top node
NAME           CPU(cores)   CPU(%)   MEMORY(bytes)   MEMORY(%)   
k8s-master01   132m         3%       932Mi           5%          
k8s-master02   131m         3%       845Mi           5%          
k8s-master03   148m         3%       912Mi           5%          
k8s-node01     54m          1%       600Mi           3%          
k8s-node02     49m          1%       602Mi           3%          
[root@k8s-master01 ~]#  kubectl top po -A
NAMESPACE              NAME                                         CPU(cores)   MEMORY(bytes)   
ingress-nginx          ingress-nginx-controller-5v9gl               2m           98Mi            
ingress-nginx          ingress-nginx-controller-r978m               1m           104Mi           
krm                    krm-backend-d7ff675d8-vmt9z                  1m           21Mi            
krm                    krm-frontend-588ffd677b-c2pgj                1m           4Mi             
krm                    nginx-574cf48959-vcfjs                       0m           2Mi             
kube-system            calico-kube-controllers-6f497d8478-v2q8c     6m           17Mi            
kube-system            calico-node-7mzmb                            16m          176Mi           
kube-system            calico-node-ljqnl                            15m          182Mi           
kube-system            calico-node-njqlb                            19m          180Mi           
kube-system            calico-node-ph4m4                            15m          178Mi           
kube-system            calico-node-rx8rl                            17m          180Mi           
kube-system            coredns-76fccbbb6b-76559                     2m           16Mi            
kube-system            coredns-76fccbbb6b-hkvn7                     2m           16Mi            
kube-system            etcd-k8s-master01                            22m          86Mi            
kube-system            etcd-k8s-master02                            27m          84Mi            
kube-system            etcd-k8s-master03                            22m          84Mi            
kube-system            kube-apiserver-k8s-master01                  22m          267Mi           
kube-system            kube-apiserver-k8s-master02                  20m          242Mi           
kube-system            kube-apiserver-k8s-master03                  18m          241Mi           
kube-system            kube-controller-manager-k8s-master01         6m           69Mi            
kube-system            kube-controller-manager-k8s-master02         2m           21Mi            
kube-system            kube-controller-manager-k8s-master03         1m           19Mi            
kube-system            kube-proxy-9dtz4                             11m          30Mi            
kube-system            kube-proxy-jh7rl                             1m           27Mi            
kube-system            kube-proxy-jvvwt                             17m          29Mi            
kube-system            kube-proxy-sh89l                             1m           29Mi            
kube-system            kube-proxy-t2j49                             16m          29Mi            
kube-system            kube-scheduler-k8s-master01                  6m           25Mi            
kube-system            kube-scheduler-k8s-master02                  6m           25Mi            
kube-system            kube-scheduler-k8s-master03                  6m           25Mi            
kube-system            metrics-server-7d9d8df576-jgnp2              2m           26Mi            
kubernetes-dashboard   dashboard-metrics-scraper-69b4796d9b-klnwr   1m           19Mi            
kubernetes-dashboard   kubernetes-dashboard-778584b9dd-pd5ln        1m           31Mi  

# 9. Dashboard 部署

# 9.1 安装 Dashboard

Dashboard 用于展示集群中的各类资源,同时也可以通过 Dashboard 实时查看 Pod 的日志和在容器中执行一些命令等。

cd /root/k8s-ha-install/dashboard/

[root@k8s-master01 dashboard]# kubectl  create -f .
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
# 9.2 登录 dashboard

在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问 Dashboard 的问题,参考下图:

--test-type --ignore-certificate-errors

pEgWfHJ.png

更改 dashboard 的 svc 为 NodePort:

kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard

pEgW5NR.png

将 ClusterIP 更改为 NodePort(如果已经为 NodePort 忽略此步骤)

查看端口号:

[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.96.139.11   <none>        443:32409/TCP   24h

根据自己的实例端口号,通过任意安装了 kube-proxy 的宿主机的 IP + 端口即可访问到 dashboard:

访问 Dashboard:https://192.168.1.71:32409 (把 IP 地址和端口改成你自己的)选择登录方式为令牌(即 token 方式),参考下图:

pEgW736.png

创建登录 Token:

kubectl create token admin-user -n kube-system

将 token 值输入到令牌后,单击登录即可访问 Dashboard,参考下图:

pEgfPv8.png

# 10.【必看】一些必须的配置更改

将 Kube-proxy 改为 ipvs 模式,因为在初始化集群的时候注释了 ipvs 配置,所以需要自行修改一下:

在 master01 节点执行:

kubectl edit cm kube-proxy -n kube-system
mode: ipvs

更新 Kube-Proxy 的 Pod:

kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system

验证 Kube-Proxy 模式:

[root@k8s-master01]# curl 127.0.0.1:10249/proxyMode
ipvs

# 11.【必看】注意事项

注意:kubeadm 安装的集群,证书有效期默认是一年。master 节点的 kube-apiserver、kube-scheduler、kube-controller-manager、etcd 都是以容器运行的。可以通过 kubectl get po -n kube-system 查看。

启动和二进制不同的是,kubelet 的配置文件在 /etc/sysconfig/kubelet 和 /var/lib/kubelet/config.yaml,修改后需要重启 kubelet 进程。

其他组件的配置文件在 /etc/kubernetes/manifests 目录下,比如 kube-apiserver.yaml,该 yaml 文件更改后,kubelet 会自动刷新配置,也就是会重启 pod。不能再次创建该文件。

kube-proxy 的配置在 kube-system 命名空间下的 configmap 中,可以通过

kubectl edit cm kube-proxy -n kube-system

进行更改,更改完成后,可以通过 patch 重启 kube-proxy

kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system

Kubeadm 安装后,master 节点默认不允许部署 pod,可以通过以下方式删除 Taint,即可部署 Pod:

[root@k8s-master01 ~]# kubectl  taint node  -l node-role.kubernetes.io/control-plane node-role.kubernetes.io/control-plane:NoSchedule-

# 12. Containerd 配置镜像加速

# vim /etc/containerd/config.toml
#添加以下配置镜像加速服务
       [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
        endpoint=["https://dockerproxy.com", "https://mirror.baidubce.com","https://ccr.ccs.tencentyun.com","https://docker.m.daocloud.io","https://docker.nju.edu.cn","https://docker.mirrors.ustc.edu.cn","https://registry-1.docker.io", "https://hbv0b596.mirror.aliyuncs.com"]
       [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
        endpoint=["https://dockerproxy.com", "https://mirror.baidubce.com","https://ccr.ccs.tencentyun.com","https://docker.m.daocloud.io","https://docker.nju.edu.cn","https://docker.mirrors.ustc.edu.cn","https://hbv0b596.mirror.aliyuncs.com", "https://k8s.m.daocloud.io", "https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"]

所有节点重新启动 Containerd:

# systemctl daemon-reload
# systemctl restart containerd

# 13. Docker 配置镜像加速

# sudo mkdir -p /etc/docker
# sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": [
	  "https://docker.credclouds.com",
	  "https://k8s.credclouds.com",
	  "https://quay.credclouds.com",
	  "https://gcr.credclouds.com",
	  "https://k8s-gcr.credclouds.com",
	  "https://ghcr.credclouds.com",
	  "https://do.nark.eu.org",
	  "https://docker.m.daocloud.io",
	  "https://docker.nju.edu.cn",
	  "https://docker.mirrors.sjtug.sjtu.edu.cn",
	  "https://docker.1panel.live",
	  "https://docker.rainbond.cc"
  ], 
  "exec-opts": ["native.cgroupdriver=systemd"] 
}
EOF

所有节点重新启动 Docker:

# systemctl daemon-reload
# systemctl enable --now docker

本文出自于:https://edu.51cto.com/course/23845.html

此文章已被阅读次数:正在加载...更新于

请我喝[茶]~( ̄▽ ̄)~*

Xu Yong 微信支付

微信支付

Xu Yong 支付宝

支付宝