# 二进制高可用安装 K8s 集群
# 1. 基本配置
# 1.1 基本环境配置
主机名 | IP 地址 | 说明 |
---|---|---|
k8s-master01 ~ 03 | 192.168.1.71 ~ 73 | master 节点 * 3 |
/ | 192.168.1.70 | keepalived 虚拟 IP(不占用机器) |
k8s-node01 ~ 02 | 192.168.1.74/75 | worker 节点 * 2 |
请统一替换这些网段,Pod 网段和 service 和宿主机网段不要重复!!!
* 配置信息 * | 备注 |
---|---|
系统版本 | Rocky Linux 8/9 |
Containerd | latest |
Pod 网段 | 172.16.0.0/16 |
Service 网段 | 10.96.0.0/16 |
所有节点更改主机名(其它节点按需修改):
hostnamectl set-hostname k8s-master01
所有节点配置 hosts,修改 /etc/hosts 如下:
[root@k8s-master01 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.71 k8s-master01
192.168.1.72 k8s-master02
192.168.1.73 k8s-master03
192.168.1.74 k8s-node01
192.168.1.75 k8s-node02
所有节点配置 yum 源:
# 配置基础源
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
-i.bak \
/etc/yum.repos.d/*.repo
yum makecache
所有节点必备工具安装:
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git rsyslog -y
所有节点关闭防火墙、selinux、dnsmasq、swap、开启 rsyslog。服务器配置如下:
systemctl disable --now firewalld
systemctl disable --now dnsmasq
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
systemctl enable --now rsyslog
所有节点关闭 swap 分区:
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
所有节点安装 ntpdate:
sudo dnf install epel-release -y
sudo dnf config-manager --set-enabled epel
sudo dnf install ntpsec
所有节点同步时间并配置上海时区:
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com
# 加入到crontab
crontab -e
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com
所有节点配置 limit:
ulimit -SHn 65535
vim /etc/security/limits.conf
# 末尾添加如下内容
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
所有节点升级系统:
yum update -y
Master01 节点免密钥登录其他节点,安装过程中生成配置文件和证书均在 Master01 上操作,集群管理也在 Master01 上操作:
ssh-keygen -t rsa
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
注意:公有云环境,可能需要把 kubectl 放在一个非 Master 节点上
Master01 节点下载安装所有的源码文件:
cd /root/ ; git clone https://gitee.com/chinagei/k8s-ha-install
# 1.2 内核配置
所有节点安装 ipvsadm:
yum install ipvsadm ipset sysstat conntrack libseccomp -y
所有节点配置 ipvs 模块:
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
所有节点创建 ipvs.conf,并配置开机自动加载:
vim /etc/modules-load.d/ipvs.conf
# 加入以下内容
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
所有节点然后执行 systemctl enable --now systemd-modules-load.service 即可(报错不用管)
systemctl enable --now systemd-modules-load.service
所有节点内核优化配置:
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
net.ipv4.conf.all.route_localnet = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
所有节点应用配置:
sysctl --system
所有节点配置完内核后,重启机器,之后查看内核模块是否已自动加载:
reboot
lsmod | grep --color=auto -e ip_vs -e nf_conntrack
# 2. 高可用组件安装
注意:如果安装的不是高可用集群,haproxy 和 keepalived 无需安装
注意:公有云要用公有云自带的负载均衡,比如阿里云的 SLB、NLB,腾讯云的 ELB,用来替代 haproxy 和 keepalived,因为公有云大部分都是不支持 keepalived 的。
所有 Master 节点通过 yum 安装 HAProxy 和 KeepAlived:
yum install keepalived haproxy -y
所有 Master 节点配置 HAProxy,需要注意黄色部分的 IP:
[root@k8s-master01 etc]# mkdir /etc/haproxy
[root@k8s-master01 etc]# vim /etc/haproxy/haproxy.cfg
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:8443 #HAProxy监听端口
bind 127.0.0.1:8443 #HAProxy监听端口
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master01 192.168.1.71:6443 check #API Server IP地址
server k8s-master02 192.168.1.72:6443 check #API Server IP地址
server k8s-master03 192.168.1.73:6443 check #API Server IP地址
所有 Master 节点配置 KeepAlived,需要注意黄色部分的配置。
Master01 节点的配置:
[root@k8s-master01 etc]# mkdir /etc/keepalived
[root@k8s-master01 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens160 #网卡名称
mcast_src_ip 192.168.1.71 #K8s-master01 IP地址
virtual_router_id 51
priority 101
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.1.70 #VIP地址
}
track_script {
chk_apiserver
}
}
Master02 节点的配置:
# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens160 #网卡名称
mcast_src_ip 192.168.1.72 #K8s-master02 IP地址
virtual_router_id 51
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.1.70 #VIP地址
}
track_script {
chk_apiserver
}
}
Master03 节点的配置:
# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens160 #网卡名称
mcast_src_ip 192.168.1.73 #K8s-master03 IP地址
virtual_router_id 51
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.1.70 #VIP地址
}
track_script {
chk_apiserver
}
}
所有 master 节点配置 KeepAlived 健康检查文件:
[root@k8s-master01 keepalived]# vim /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
所有 master 节点配置健康检查文件添加执行权限:
chmod +x /etc/keepalived/check_apiserver.sh
所有 master 节点启动 haproxy 和 keepalived:
[root@k8s-master01 keepalived]# systemctl daemon-reload
[root@k8s-master01 keepalived]# systemctl enable --now haproxy
[root@k8s-master01 keepalived]# systemctl enable --now keepalived
重要:如果安装了 keepalived 和 haproxy,需要测试 keepalived 是否是正常的
所有节点测试VIP
[root@k8s-master01 ~]# ping 192.168.1.70 -c 4
PING 192.168.1.70 (192.168.1.70) 56(84) bytes of data.
64 bytes from 192.168.1.70: icmp_seq=1 ttl=64 time=0.464 ms
64 bytes from 192.168.1.70: icmp_seq=2 ttl=64 time=0.063 ms
64 bytes from 192.168.1.70: icmp_seq=3 ttl=64 time=0.062 ms
64 bytes from 192.168.1.70: icmp_seq=4 ttl=64 time=0.063 ms
[root@k8s-master01 ~]# telnet 192.168.1.70 16443
Trying 192.168.1.70...
Connected to 192.168.1.70.
Escape character is '^]'.
Connection closed by foreign host.
如果 ping 不通且 telnet 没有出现 ] ,则认为 VIP 不可以,不可在继续往下执行,需要排查 keepalived 的问题,比如防火墙和 selinux,haproxy 和 keepalived 的状态,监听端口等
- 所有节点查看防火墙状态必须为 disable 和 inactive:systemctl status firewalld
- 所有节点查看 selinux 状态,必须为 disable:getenforce
- master 节点查看 haproxy 和 keepalived 状态:systemctl status keepalived haproxy
- master 节点查看监听端口:netstat -lntp
如果以上都没有问题,需要确认:
是否是公有云机器
是否是私有云机器(类似 OpenStack)
上述公有云一般都是不支持 keepalived,私有云可能也有限制,需要和自己的私有云管理员咨询
# 3. Runtime 安装
如果安装的版本低于 1.24,选择 Docker 和 Containerd 均可,高于 1.24 建议选择 Containerd 作为 Runtime,不再推荐使用 Docker 作为 Runtime。
# 3.1 安装 Containerd
所有节点配置安装源:
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
所有节点安装 docker-ce(如果在以前已经安装过,需要重新安装更新一下):
# yum install docker-ce containerd -y
可以无需启动 Docker,只需要配置和启动 Containerd 即可。
首先配置 Containerd 所需的模块(所有节点):
# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
所有节点加载模块:
# modprobe -- overlay
# modprobe -- br_netfilter
所有节点,配置 Containerd 所需的内核:
# cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
所有节点加载内核:
# sysctl --system
所有节点生成 Containerd 的配置文件:
# mkdir -p /etc/containerd
# containerd config default | tee /etc/containerd/config.toml
所有节点更改 Containerd 的 Cgroup 和 Pause 镜像配置:
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml
sed -i 's#k8s.gcr.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g' /etc/containerd/config.toml
sed -i 's#registry.gcr.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g' /etc/containerd/config.toml
sed -i 's#registry.k8s.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g' /etc/containerd/config.toml
所有节点启动 Containerd,并配置开机自启动:
# systemctl daemon-reload
# systemctl enable --now containerd
所有节点配置 crictl 客户端连接的运行时位置(可选):
# cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
# 4 . K8S 及 etcd 安装
Master01 下载 kubernetes 安装包(1.32.3 需要更改为你看到的最新版本):
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.32.0/kubernetes-server-linux-amd64.tar.gz
最新版获取地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/
以下操作都在 master01 执行
下载 etcd 安装包:https://github.com/etcd-io/etcd/releases/
[root@k8s-master01 ~]# wget https://github.com/etcd-io/etcd/releases/download/v3.5.16/etcd-v3.5.16-linux-amd64.tar.gz
解压 kubernetes 安装文件:
[root@k8s-master01 ~]# tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
解压 etcd 安装文件:
[root@k8s-master01 ~]# tar -zxvf etcd-v3.5.16-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.16-linux-amd64/etcd{,ctl}
版本查看:
[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.32.3
[root@k8s-master01 ~]# etcdctl version
etcdctl version: 3.5.16
API version: 3.5
将组件发送到其他节点
MasterNodes='k8s-master02 k8s-master03'
WorkNodes='k8s-node01 k8s-node02'
for NODE in $MasterNodes; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
for NODE in $WorkNodes; do scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done
Master01 节点切换到 1.32.x 分支(其他版本可以切换到其他分支,.x 即可,不需要更改为具体的小版本):
cd /root/k8s-ha-install && git checkout manual-installation-v1.32.x
# 5 . 生成证书
二进制安装最关键步骤,一步错误全盘皆输,一定要注意每个步骤都要是正确的
Master01 下载生成证书工具(下载不成功可以去百度网盘)
wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl
wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
# 5.1 Etcd 证书
所有 Master 节点创建 etcd 证书目录:
mkdir /etc/etcd/ssl -p
所有节点创建 kubernetes 相关目录:
mkdir -p /etc/kubernetes/pki
Master01 节点生成 etcd 证书
生成证书的 CSR(证书签名请求文件,配置了一些域名、公司、单位)文件:
[root@k8s-master01 pki]# cd /root/k8s-ha-install/pki
# 生成etcd CA证书和CA证书的key
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.1.71,192.168.1.72,192.168.1.73 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
执行结果
[INFO] generate received request
[INFO] received CSR
[INFO] generating key: rsa-2048
[INFO] encoded CSR
[INFO] signed certificate with serial number 250230878926052708909595617022917808304837732033
将证书复制到其他 master 节点
MasterNodes='k8s-master02 k8s-master03'
for NODE in $MasterNodes; do
ssh $NODE "mkdir -p /etc/etcd/ssl"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do
scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
done
done
# 5.2 K8s 组件证书
Master01 生成 kubernetes CA 证书:
[root@k8s-master01 pki]# cd /root/k8s-ha-install/pki
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
# 5.2.1 APIServer 证书
注意:10.96.0. 是 k8s service 的网段,如果说需要更改 k8s service 网段,那就需要更改 10.96.0.1
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.1.70,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.1.71,192.168.1.72,192.168.1.73 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
生成 apiserver 的聚合证书::
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
返回结果(忽略警告):
2020/12/11 18:15:28 [INFO] generate received request
2020/12/11 18:15:28 [INFO] received CSR
2020/12/11 18:15:28 [INFO] generating key: rsa-2048
2020/12/11 18:15:28 [INFO] encoded CSR
2020/12/11 18:15:28 [INFO] signed certificate with serial number 597484897564859295955894546063479154194995827845
2020/12/11 18:15:28 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
# 5.2.2 ControllerManager
生成 controller-manage 的证书:
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
注意:修改黄色部分的IP地址
# set-cluster:设置一个集群项,
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.70:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个环境项,一个上下文
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# set-credentials 设置一个用户项
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 使用某个环境当做默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 5.2.3 Scheduler 证书
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
注意:修改黄色部分的IP地址
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.70:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 5.2.4 生成管理员证书
Kubectl /etc/Kubernetes/admin.conf ~/.kube/config
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
注意:修改黄色部分的IP
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.70:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 5.2.5 创建 ServiceAccount 证书
创建一对公钥,用来签发 ServiceAccount 的 Token:
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
返回结果:
Generating RSA private key, 2048 bit long modulus (2 primes)
...................................................................................+++++
...............+++++
e is 65537 (0x010001)
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
发送证书至其他节点:
for NODE in k8s-master02 k8s-master03; do
for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do
scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE};
done;
for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do
scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};
done;
done
查看证书文件:
[root@k8s-master01 pki]# ls /etc/kubernetes/pki/
admin.csr apiserver.csr ca.csr controller-manager.csr front-proxy-ca.csr front-proxy-client.csr sa.key scheduler-key.pem
admin-key.pem apiserver-key.pem ca-key.pem controller-manager-key.pem front-proxy-ca-key.pem front-proxy-client-key.pem sa.pub scheduler.pem
admin.pem apiserver.pem ca.pem controller-manager.pem front-proxy-ca.pem front-proxy-client.pem scheduler.csr
[root@k8s-master01 pki]# ls /etc/kubernetes/pki/ |wc -l
23
# 6. Kubernetes 组件配置
# 6.1 Ecd 配置
Etcd 配置大致相同,注意修改每个 Master 节点的 etcd 配置的主机名和 IP 地址
# 6.1.1 Master01
# vim /etc/etcd/etcd.config.yml
name: 'k8s-master01' # k8s-master01名称
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.71:2380' # k8s-master01 IP
listen-client-urls: 'https://192.168.1.71:2379,http://127.0.0.1:2379' # k8s-master01 IP
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.71:2380' # k8s-master01 IP
advertise-client-urls: 'https://192.168.1.71:2379' # k8s-master01 IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.71:2380,k8s-master02=https://192.168.1.72:2380,k8s-master03=https://192.168.1.73:2380' # k8s-master01、k8s-master02、k8s-master03 IP
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
# 6.1.2 Master02
# vim /etc/etcd/etcd.config.yml
name: 'k8s-master02' # k8s-master02名称
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.72:2380' # k8s-master02 IP
listen-client-urls: 'https://192.168.1.72:2379,http://127.0.0.1:2379' # k8s-master02 IP
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.72:2380' # k8s-master02 IP
advertise-client-urls: 'https://192.168.1.72:2379' # k8s-master02 IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.71:2380,k8s-master02=https://192.168.1.72:2380,k8s-master03=https://192.168.1.73:2380' # k8s-master01、k8s-master02、k8s-master03 IP
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
# 6.1.3 Master03
# vim /etc/etcd/etcd.config.yml
name: 'k8s-master03' # k8s-master03名称
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.73:2380' # k8s-master03 IP
listen-client-urls: 'https://192.168.1.73:2379,http://127.0.0.1:2379' # k8s-master03 IP
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.73:2380' # k8s-master03 IP
advertise-client-urls: 'https://192.168.1.73:2379' # k8s-master03 IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.1.71:2380,k8s-master02=https://192.168.1.72:2380,k8s-master03=https://192.168.1.73:2380' # k8s-master01、k8s-master02、k8s-master03 IP
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
# 6.1.4 启动 Etcd
所有 Master 节点创建 etcd service 并启动
# vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
所有 Master 节点创建 etcd 的证书目录:
mkdir /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
systemctl daemon-reload
systemctl enable --now etcd
查看 etcd 状态:
export ETCDCTL_API=3
etcdctl --endpoints="192.168.1.73:2379,192.168.1.72:2379,192.168.1.71:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
# 6.2 APIServer 配置
# 6.2.1 Master01
注意:本文档使用的 k8s service 网段为 10.96.0.0/16,该网段不能和宿主机的网段、Pod 网段的重复,请按需修改:
[root@k8s-master01 pki]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.1.71 \
--service-cluster-ip-range=10.96.0.0/16 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.1.71:2379,https://192.168.1.72:2379,https://192.168.1.73:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
# 6.2.2 Master02
注意:本文档使用的 k8s service 网段为 10.96.0.0/16,该网段不能和宿主机的网段、Pod 网段的重复,请按需修改:
[root@k8s-master01 pki]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.1.72 \
--service-cluster-ip-range=10.96.0.0/16 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.1.71:2379,https://192.168.1.72:2379,https://192.168.1.73:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
# 6.2.3 Master03
注意:本文档使用的 k8s service 网段为 10.96.0.0/16,该网段不能和宿主机的网段、Pod 网段的重复,请按需修改:
[root@k8s-master01 pki]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.1.73 \
--service-cluster-ip-range=10.96.0.0/16 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.1.71:2379,https://192.168.1.72:2379,https://192.168.1.73:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
# 6.2.4 启动 apiserver
所有 Master 节点开启 kube-apiserver:
systemctl daemon-reload && systemctl enable --now kube-apiserver
检测 kube-server 状态:
# systemctl status kube-apiserver
● kube-apiserver.service – Kubernetes API Server
Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
Active: active (running) since Sat 2020-08-22 21:26:49 CST; 26s ago
如果系统日志有这些提示可以忽略:
Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.004739 7450 clientconn.go:948] ClientConn switching balancer to “pick_first”
Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.004843 7450 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc011bd4c80, {CONNECTING <nil>}
Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.010725 7450 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc011bd4c80, {READY <nil>}
Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.011370 7450 controlbuf.go:508] transport: loopyWriter.run returning. Connection error: desc = “transport is closing”
# 6.3 ControllerManage
所有 Master 节点配置 kube-controller-manager service(所有 master 节点配置一样)
注意:本文档使用的 k8s Pod 网段为 172.16.0.0/16,该网段不能和宿主机的网段、k8s Service 网段的重复,请按需修改:
[root@k8s-master01 pki]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--v=2 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--authentication-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--authorization-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr=172.16.0.0/16 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
所有 Master 节点启动 kube-controller-manager
[root@k8s-master01 pki]# systemctl daemon-reload
[root@k8s-master01 pki]# systemctl enable --now kube-controller-manager
Created symlink /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service → /usr/lib/systemd/system/kube-controller-manager.service.
查看启动状态
[root@k8s-master01 pki]# systemctl status kube-controller-manager
● kube-controller-manager.service – Kubernetes Controller Manager
Loaded: loaded (/usr/lib/ ubern/system/kube-controller-manager.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2020-12-11 20:53:05 CST; 8s ago
Docs: https://github.com/ ubernetes/ ubernetes
Main PID: 7518 (kube-controller)
# 6.4 Scheduler
所有 Master 节点配置 kube-scheduler service(所有 master 节点配置一样)
[root@k8s-master01 pki]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--leader-elect=true \
--authentication-kubeconfig=/etc/kubernetes/scheduler.kubeconfig \
--authorization-kubeconfig=/etc/kubernetes/scheduler.kubeconfig \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
启动 scheduler:
[root@k8s-master01 pki]# systemctl daemon-reload
[root@k8s-master01 pki]# systemctl enable --now kube-scheduler
Created symlink /etc/systemd/system/multi-user.target.wants/kube-scheduler.service → /usr/lib/systemd/system/kube-scheduler.service.
[root@k8s-master01 pki]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since Wed 2022-05-04 17:31:13 CST; 6s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 5815 (kube-scheduler)
Tasks: 9
Memory: 19.8M
# 7. TLS Bootstrapping 配置
只需要在 Master01 创建 bootstrap
注意: 修改黄色部分的 IP 地址
cd /root/k8s-ha-install/bootstrap
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.70:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
[root@k8s-master01 bootstrap]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
可以正常查询集群状态,才可以继续往下,否则不行,需要排查 k8s 组件是否有故障(只要有结果即可,如果返回不一样不影响)
# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy ok
创建 bootstrap 相关资源:
[root@k8s-master01 bootstrap]# kubectl create -f bootstrap.secret.yaml
secret/bootstrap-token-c8ad9c created
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-certificate-rotation created
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
# 8. Node 节点配置
# 8.1 复制证书
Master01 节点复制证书至其他节点:
cd /etc/kubernetes/
for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do
ssh $NODE mkdir -p /etc/kubernetes/pki
for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
done
done
执行结果:
ca.pem 100% 1407 459.5KB/s 00:00
…
bootstrap-kubelet.kubeconfig 100% 2291 685.4KB/s 00:00
# 8.2 Kubelet 配置
所有节点创建 Kubelet 配置目录
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
所有节点配置 kubelet service
[root@k8s-master01 bootstrap]# vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
所有节点配置 kubelet service 的配置文件(也可以写到 kubelet.service):
# Runtime为Containerd
# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
所有节点创建 kubelet 的配置文件
注意:如果更改了 k8s 的 service 网段,需要更改 kubelet-conf.yml 的 clusterDNS: 配置,改成 k8s Service 网段的第十个地址,比如 10.96.0.10
[root@k8s-master01 bootstrap]# vim /etc/kubernetes/kubelet-conf.yml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
启动所有节点 kubelet
systemctl daemon-reload
systemctl enable --now kubelet
此时系统日志 /var/log/messages**** 显示只有如下两种信息为正常 ****,安装 calico 后即可恢复
Unable to update cni config: no networks found in /etc/cni/net.d
如果有很多报错日志,或者有大量看不懂的报错,说明 kubelet 的配置有误,需要检查 kubelet 配置
Master01 查看集群状态 (Ready 或 NotReady 都正常)
[root@k8s-master01 bootstrap]# kubectl get node
# 8.3 kube-proxy 配置
注意,如果不是高可用集群,192.168.1.70:8443 改为 master01 的地址,8443 改为 apiserver 的端口,默认是 6443
生成 kube-proxy 的证书,以下操作只在 Master01 执行
cd /root/k8s-ha-install/pki
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.70:8443 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-credentials system:kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-context system:kube-proxy@kubernetes \
--cluster=kubernetes \
--user=system:kube-proxy \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config use-context system:kube-proxy@kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
将 kubeconfig 发送至其他节点
for NODE in k8s-master02 k8s-master03; do
scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
done
for NODE in k8s-node01 k8s-node02; do
scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
done
所有节点添加 kube-proxy 的配置和 service 文件:
vim /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
如果更改了集群 Pod 的网段,需要更改 kube-proxy.yaml 的 clusterCIDR 为自己的 Pod 网段:
vim /etc/kubernetes/kube-proxy.yaml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/16
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
所有节点启动 kube-proxy
[root@k8s-master01 k8s-ha-install]# systemctl daemon-reload
[root@k8s-master01 k8s-ha-install]# systemctl enable --now kube-proxy
Created symlink /etc/systemd/system/multi-user.target.wants/kube-proxy.service → /usr/lib/systemd/system/kube-proxy.service.
此时系统日志 /var/log/messages**** 显示只有如下两种信息为正常 ****,安装 calico 后即可恢复
Unable to update cni config: no networks found in /etc/cni/net.d
# 9. Calico 组件的安装
以下步骤只在 master01 执行:
cd /root/k8s-ha-install/calico/
更改 calico 的网段,主要需要将红色部分的网段,改为自己的 Pod 网段
sed -i "s#POD_CIDR#172.16.0.0/16#g" calico.yaml
检查网段是自己的 Pod 网段, grep "IPV4POOL_CIDR" calico.yaml -A 1
查看容器和节点状态:
[root@k8s-master01 calico]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-66686fdb54-mk2g6 1/1 Running 1 (20s ago) 85s
calico-node-8fxqp 1/1 Running 0 85s
calico-node-8nkfl 1/1 Running 0 86s
calico-node-pmpf4 1/1 Running 0 86s
calico-node-vnlk7 1/1 Running 0 86s
calico-node-xpchb 1/1 Running 0 85s
calico-typha-67c6dc57d6-259t8 1/1 Running 0 86s
如果容器状态异常可以使用 kubectl describe 或者 kubectl logs 查看容器的日志
- Kubectl logs -f POD_NAME -n kube-system
- Kubectl logs -f POD_NAME -c upgrade-ipam -n kube-system
# 10. 安装 CoreDNS
cd /root/k8s-ha-install/
如果更改了 k8s service 的网段需要将 coredns 的 serviceIP 改成 k8s service 网段的第十个 IP
COREDNS_SERVICE_IP=`kubectl get svc | grep kubernetes | awk '{print $3}'`0
sed -i "s#KUBEDNS_SERVICE_IP#${COREDNS_SERVICE_IP}#g" CoreDNS/coredns.yaml
安装 coredns
[root@k8s-master01 k8s-ha-install]# kubectl create -f CoreDNS/coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
# 11. Metrics 部署
在新版的 Kubernetes 中系统资源的采集均使用 Metrics-server,可以通过 Metrics 采集节点和 Pod 的内存、磁盘、CPU 和网络的使用率。
以下操作均在 master01 节点执行,安装 metrics server:
cd /root/k8s-ha-install/metrics-server
kubectl create -f .
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
等待 metrics server 启动然后查看状态:
# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 231m 5% 1620Mi 42%
k8s-master02 274m 6% 1203Mi 31%
k8s-master03 202m 5% 1251Mi 32%
k8s-node01 69m 1% 667Mi 17%
k8s-node02 73m 1% 650Mi 16%
如果有如下报错,可以等待 10 分钟后,再次查看:
Error from server (ServiceUnavailable): the server is currently unable to handle the request (get nodes.metrics.k8s.io)
# 12. Dashboard 部署
# 12.1 安装 Dashboard
Dashboard 用于展示集群中的各类资源,同时也可以通过 Dashboard 实时查看 Pod 的日志和在容器中执行一些命令等。
cd /root/k8s-ha-install/dashboard/
[root@k8s-master01 dashboard]# kubectl create -f .
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
# 12.2 登录 dashboard
在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问 Dashboard 的问题,参考下图:
--test-type --ignore-certificate-errors
更改 dashboard 的 svc 为 NodePort:
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
将 ClusterIP 更改为 NodePort(如果已经为 NodePort 忽略此步骤)
查看端口号:
[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.96.139.11 <none> 443:32409/TCP 24h
根据自己的实例端口号,通过任意安装了 kube-proxy 的宿主机的 IP + 端口即可访问到 dashboard:
访问 Dashboard:https://192.168.1.71:32409 (把 IP 地址和端口改成你自己的)选择登录方式为令牌(即 token 方式),参考下图:
创建登录 Token:
kubectl create token admin-user -n kube-system
将 token 值输入到令牌后,单击登录即可访问 Dashboard,参考下图:
# 14. Containerd 配置镜像加速
# vim /etc/containerd/config.toml
#添加以下配置镜像加速服务
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint=["https://dockerproxy.com", "https://mirror.baidubce.com","https://ccr.ccs.tencentyun.com","https://docker.m.daocloud.io","https://docker.nju.edu.cn","https://docker.mirrors.ustc.edu.cn","https://registry-1.docker.io", "https://hbv0b596.mirror.aliyuncs.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
endpoint=["https://dockerproxy.com", "https://mirror.baidubce.com","https://ccr.ccs.tencentyun.com","https://docker.m.daocloud.io","https://docker.nju.edu.cn","https://docker.mirrors.ustc.edu.cn","https://hbv0b596.mirror.aliyuncs.com", "https://k8s.m.daocloud.io", "https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"]
所有节点重新启动 Containerd:
# systemctl daemon-reload
# systemctl restart containerd
# 15. Docker 配置镜像加速
# sudo mkdir -p /etc/docker
# sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": [
"https://docker.credclouds.com",
"https://k8s.credclouds.com",
"https://quay.credclouds.com",
"https://gcr.credclouds.com",
"https://k8s-gcr.credclouds.com",
"https://ghcr.credclouds.com",
"https://do.nark.eu.org",
"https://docker.m.daocloud.io",
"https://docker.nju.edu.cn",
"https://docker.mirrors.sjtug.sjtu.edu.cn",
"https://docker.1panel.live",
"https://docker.rainbond.cc"
],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
所有节点重新启动 Docker:
# systemctl daemon-reload
# systemctl enable --now docker