1、环境准备

IP地址

主机

角色

系统

192.168.100.153

k8s-master01

K8S集群主节点

Centos7.9

192.168.100.154

k8s-master02

K8S集群主节点

Centos7.9

192.168.100.155

k8s-master03

K8S集群主节点

Centos7.9

192.168.100.156

k8s-node01

K8S集群从节点

Centos7.9

192.168.100.157

k8s-node02

K8S集群从节点

Centos7.9

192.168.100.99

虚拟IP

keepalived虚拟IP,配置在master01


1-1、主机名互相解析

#所有节点:
[root@k8s-master01 ~]#hostnamectl set-hostname k8s-master03
[root@k8s-master01 ~]#vim /etc/hosts
192.168.100.153 k8s-master01
192.168.100.154 k8s-master02
192.168.100.155 k8s-master03
192.168.100.99 k8s-master-vip
192.168.100.156 k8s-node01
192.168.100.157 k8s-node02

1-2、CentOS Yum源配置

#所有节点:
[root@k8s-master01 ~]#curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s-master01 ~]#yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master01 ~]#yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

1-3、必备工具安装

#所有节点:
[root@k8s-master01 ~]#yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y

1-4、关闭防火墙

#所有节点:
[root@k8s-master01 ~]#systemctl disable --now firewalld
[root@k8s-master01 ~]#systemctl disable --now dnsmasq
[root@k8s-master01 ~]#systemctl disable --now NetworkManager
[root@k8s-master01 ~]#setenforce 0
[root@k8s-master01 ~]#sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
[root@k8s-master01 ~]#sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
[root@k8s-master01 ~]#cat /etc/sysconfig/selinux
[root@k8s-master01 ~]#cat /etc/selinux/config

1-5、禁用swap

#所有节点:
[root@k8s-master01 ~]#swapoff -a && sysctl -w vm.swappiness=0
[root@k8s-master01 ~]#sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

1-6、时间同步

#所有节点:
[root@k8s-master01 ~]#ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
[root@k8s-master01 ~]#echo 'Asia/Shanghai' >/etc/timezone
[root@k8s-master01 ~]#yum install -y chrony
[root@k8s-master01 ~]#systemctl restart chronyd
[root@k8s-master01 ~]#systemctl enable --now chronyd
[root@k8s-master01 ~]#chronyc sources

1-7、所有节点配置limit

#所有节点:
[root@k8s-master01 ~]#ulimit -SHn 65535

[root@k8s-master01 ~]#vim /etc/security/limits.conf #最后增加
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

1-8、系统升级、内核升级

# 所有节点
[root@k8s-master01 ~]#yum update -y --exclude=kernel*
[root@k8s-master01 ~]#wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
[root@k8s-master01 ~]#wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
[root@k8s-master01 ~]# yum localinstall -y kernel-ml*

# 更改内核启动顺序
[root@k8s-master01 ~]# grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
[root@k8s-master01 ~]# grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

# 检查默认内核是不是4.19
[root@k8s-master01 ~]# grubby --default-kernel
/boot/vmlinuz-4.19.12-1.el7.elrepo.x86_64
[root@k8s-master01 ~]# reboot

[root@k8s-master01 ~]# uname -r
4.19.12-1.el7.elrepo.x86_64

1-9、安装ipvsadm

# 所有节点
[root@k8s-master01 ~]# yum install ipvsadm ipset sysstat conntrack libseccomp -y

[root@k8s-master01 ~]# modprobe -- ip_vs
[root@k8s-master01 ~]# modprobe -- ip_vs_rr
[root@k8s-master01 ~]# modprobe -- ip_vs_wrr
[root@k8s-master01 ~]# modprobe -- ip_vs_sh
[root@k8s-master01 ~]# modprobe -- nf_conntrack


[root@k8s-master01 ~]# vim /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

[root@k8s-master01 ~]# systemctl enable --now systemd-modules-load.service
[root@k8s-master01 ~]# lsmod | grep -e ip_vs -e nf_conntrack

1-10、开启支持k8s内核参数

# 所有节点

# 开启路由转发和一些集群中必须的内核参数优化
[root@k8s-master01 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
net.ipv4.conf.all.route_localnet = 1

vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

[root@k8s-master01 ~]# sysctl --system

# 所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
[root@k8s-master01 ~]# reboot
[root@k8s-master01 ~]# lsmod | grep --color=auto -e ip_vs -e nf_conntrack

2、安装containerd

# 所有节点

#安装docker-ce-20.10
[root@k8s-master01 ~]# yum install docker-ce-20.10.* docker-ce-cli-20.10.* containerd -y

# 配置Containerd所需的模块
[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

[root@k8s-master01 ~]# modprobe -- overlay
[root@k8s-master01 ~]# modprobe -- br_netfilter

# 配置Containerd所需的内核
[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

[root@k8s-master01 ~]# sysctl --system

# 配置Containerd的配置文件
[root@k8s-master01 ~]# mkdir -p /etc/containerd
[root@k8s-master01 ~]# containerd config default | tee /etc/containerd/config.toml
# 将Containerd的Cgroup改为Systemd,pause下载改为阿里云:

[root@k8s-master01 ~]# vim /etc/containerd/config.toml
......
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8" #修改此行为阿里云地址,大约在61行
......
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true #大约在125行
......


[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now containerd

# 配置crictl客户端连接的运行时位置
[root@k8s-master01 ~]# cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

3、生成证书

说明:目前版本是1.25.4,安装时需要下载最新的1.25.x版本:(进入后点击Server Binaries 选择ARM Server版本)

​https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md​

3-1、下载相关组件

# master01:

#下载kubernetes安装包
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.25.4/kubernetes-server-linux-amd64.tar.gz

# 下载etcd安装包
[root@k8s-master01 ~]#wget https://github.com/etcd-io/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz

# 克隆k8s-ha-install
[root@k8s-master01 ~]# cd /root/ ; git clone https://github.com/dotbalo/k8s-ha-install.git
如果无法clone可以使用https://gitee.com/dukuan/k8s-ha-install.git进行克隆


# 解压kubernetes安装文件
[root@k8s-master01 ~]# tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
#说明:--strip-components=3 是剥离前三级目录

# 解压etcd安装文件
[root@k8s-master01 ~]# tar -zxvf etcd-v3.5.4-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.4-linux-amd64/etcd{,ctl}

[root@k8s-master01 ~]# ls /usr/local/bin/
etcd kube-apiserver kubectl kube-proxy
etcdctl kube-controller-manager kubelet kube-scheduler

[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.25.4
[root@k8s-master01 ~]# etcdctl version
etcdctl version: 3.5.4
API version: 3.5

# 将组件发送到其他节点
[root@k8s-master01 ~]# for i in k8s-master02 k8s-master03 k8s-node01 k8s-node02;do
> scp /usr/local/bin/kube{let,ctl,-apiserver,-proxy,-scheduler,-controller-manager} $i:/usr/local/bin/; scp /usr/local/bin/etcd* $i:/usr/local/bin/
> done

# 所有节点创建/opt/cni/bin目录
[root@k8s-master01 ~]# mkdir -p /opt/cni/bin

3-1、下载生成证书工具

[root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl

[root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson

[root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssl-certinfo_1.6.1_linux_amd64" -O /usr/local/bin/cfssl-certinfo

[root@k8s-master01 ~]# chmod +x /usr/local/bin/cfssl*

3-3、etcd证书

# 所有节点
[root@k8s-master01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master01 ~]# mkdir /etc/kubernetes/pki -p


# master01:
[root@k8s-master01 ~]# cd k8s-ha-install/
[root@k8s-master01 k8s-ha-install]# git branch -a
* master
remotes/origin/HEAD -> origin/master
remotes/origin/manual-installation
remotes/origin/manual-installation-v1.16.x
remotes/origin/manual-installation-v1.17.x
remotes/origin/manual-installation-v1.18.x
remotes/origin/manual-installation-v1.19.x
remotes/origin/manual-installation-v1.20.x
remotes/origin/manual-installation-v1.20.x-csi-hostpath
remotes/origin/manual-installation-v1.21.x
remotes/origin/manual-installation-v1.22.x
remotes/origin/manual-installation-v1.23.x
remotes/origin/manual-installation-v1.24.x
remotes/origin/manual-installation-v1.25.x
remotes/origin/master

# 切换到1.25.x分支(其他版本可以切换到其他分支,.x即可,不需要更改为具体的数字)
[root@k8s-master01 k8s-ha-install]# git checkout manual-installation-v1.25.x
Branch manual-installation-v1.25.x set up to track remote branch manual-installation-v1.25.x from origin.
Switched to a new branch 'manual-installation-v1.25.x'
[root@k8s-master01 pki]# ls
admin-csr.json ca-csr.json front-proxy-ca-csr.json kube-proxy-csr.json
apiserver-csr.json etcd-ca-csr.json front-proxy-client-csr.json manager-csr.json
ca-config.json etcd-csr.json kubelet-csr.json scheduler-csr.json


# 生成etcd CA证书和CA证书的key
[root@k8s-master01 pki]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca


[root@k8s-master01 pki]#cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \ -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.100.153,192.168.100.154,192.168.100.155 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd

2022/11/16 19:45:32 [INFO] generate received request
2022/11/16 19:45:32 [INFO] received CSR
2022/11/16 19:45:32 [INFO] generating key: rsa-2048
2022/11/16 19:45:33 [INFO] encoded CSR
2022/11/16 19:45:33 [INFO] signed certificate with serial number 497579505319136534258647128183377500491269693934

# 将证书复制到其他Master节点
[root@k8s-master01 pki]# scp /etc/etcd/ssl/*.pem 192.168.100.154:/etc/etcd/ssl/
[root@k8s-master01 pki]# scp /etc/etcd/ssl/*.pem 192.168.100.155:/etc/etcd/ssl/

3-4、生成k8s其他组件证书

# master01

# 生成kubernetes证书
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
2022/11/16 19:52:29 [INFO] generating a new CA key and certificate from CSR
2022/11/16 19:52:29 [INFO] generate received request
2022/11/16 19:52:29 [INFO] received CSR
2022/11/16 19:52:29 [INFO] generating key: rsa-2048
2022/11/16 19:52:29 [INFO] encoded CSR
2022/11/16 19:52:29 [INFO] signed certificate with serial number 430208719002673708340685834561638947457362482649

# 10.96.0.1.是k8s service的网段,如果说需要更改k8s service网段,那就需要更改10.96.0.1,
# 如果不是高可用集群,192.168.100.99为Master01的IP
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.100.99,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.100.153,192.168.100.154,192.168.100.155 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver

2022/11/16 19:54:36 [INFO] generate received request
2022/11/16 19:54:36 [INFO] received CSR
2022/11/16 19:54:36 [INFO] generating key: rsa-2048
2022/11/16 19:54:36 [INFO] encoded CSR
2022/11/16 19:54:37 [INFO] signed certificate with serial number 148342655706954807225322374042758254794472502284


# 生成apiserver的聚合证书。Requestheader-client-xxx requestheader-allowwd-xxx:aggerator
[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
2022/11/16 19:54:59 [INFO] generating a new CA key and certificate from CSR
2022/11/16 19:54:59 [INFO] generate received request
2022/11/16 19:54:59 [INFO] received CSR
2022/11/16 19:54:59 [INFO] generating key: rsa-2048
2022/11/16 19:55:01 [INFO] encoded CSR
2022/11/16 19:55:01 [INFO] signed certificate with serial number 723218518093835071844654491175562369108910318661

[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
# 返回结果(忽略警告)
2022/11/16 19:55:25 [INFO] generate received request
2022/11/16 19:55:25 [INFO] received CSR
2022/11/16 19:55:25 [INFO] generating key: rsa-2048
2022/11/16 19:55:26 [INFO] encoded CSR
2022/11/16 19:55:26 [INFO] signed certificate with serial number 103250417928382168375525563395057855193395592806
2022/11/16 19:55:26 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

# 生成controller-manage的证书
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager


# 注意,如果不是高可用集群,192.168.100.99:8443改为master01的地址,8443改为apiserver的端口,默认是6443
# set-cluster:设置一个集群项

[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.99:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 设置一个环境项,一个上下文
[root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# set-credentials 设置一个用户项
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager \> --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 使用某个环境当做默认环境
[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

[root@k8s-master01 pki]#cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler


[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.99:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig


[root@k8s-master01 pki]# kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig

[root@k8s-master01 pki]# kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig


[root@k8s-master01 pki]# kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig


[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin



[root@k8s-master01 pki]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.100.99:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig

[root@k8s-master01 pki]# kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig

[root@k8s-master01 pki]# kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig

[root@k8s-master01 pki]# kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig


# 创建ServiceAccount Key ➡ secret
[root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048

[root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

[root@k8s-master01 pki]# ls /etc/kubernetes/pki/ | wc -l
23

# 发送证书至其他节点
[root@k8s-master01 pki]# scp /etc/kubernetes/pki/* 192.168.100.154:/etc/kubernetes/pki/
[root@k8s-master01 pki]# scp /etc/kubernetes/pki/* 192.168.100.155:/etc/kubernetes/pki/