K8s高可用集群是用多个master节点加负载均衡器组成,外层再接高可用分布式存储集群例如ceph集群,实现计算能力+存储能力的高可用,同时etcd也可以独立出来用外部的etcd集群。

堆叠 etcd 集群

k8s 高可用集群配置_vim

外部 etcd 集群

k8s 高可用集群配置_ico_02

集群规划

主机

IP地址

备注

client

192.168.2.10

镜像仓库

ELB

192.168.2.100

负载均衡,添加监听器,端口 6443

x-k8s

192.168.2.11

 

y-k8s

192.168.2.12

 

z-k8s

192.168.2.13

 

work-0001

192.168.2.21

 

work-0002

192.168.2.22

 

work-0003

192.168.2.23

 

安装部署

1. 配置软件仓库

[root@ecs-proxy s4]# rsync -av docker/ /var/localrepo/docker/[root@ecs-proxy s4]# rsync -av kubernetes/packages/ /var/localrepo/k8s/[root@ecs-proxy s4]# createrepo --update /var/localrepo/

2. 搭建镜像仓库

[root@ecs-proxy s4]# rsync -av kubernetes/init/v1.29.2.tar.xz 192.168.2.10:/root/#---------------------------------------------------------------------------[root@client ~]# vim /etc/hosts192.168.2.10    client[root@client ~]# dnf install -y docker-ce docker-distribution[root@client ~]# vim /etc/docker/daemon.json{    "registry-mirrors":["http://client:5000"],    "insecure-registries":["client:5000"]}[root@client ~]# systemctl enable --now docker docker-distribution
# 上传镜像到仓库[root@client ~]# docker load -i v1.29.2.tar.xz [root@client ~]# docker images --format '{{.Repository}}:{{.Tag}}' |while read i;do     docker tag ${i} client:5000/k8s/${i##*/};     docker push client:5000/k8s/${i##*/};     docker rmi ${i} client:5000/k8s/${i##*/};done

3. 基础环境

# 所有节点都需要配置[root@work ~]# sed '/swap/d' -i /etc/fstab[root@work ~]# swapoff -a[root@work ~]# dnf remove -y firewalld-*[root@work ~]# vim /etc/hosts192.168.2.10    client192.168.2.11    x-k8s192.168.2.12    y-k8s192.168.2.13    z-k8s192.168.2.21    work-0001192.168.2.22    work-0002192.168.2.23    work-0003
[root@work ~]# dnf install -y kubeadm kubelet kubectl containerd.io ipvsadm ipset iproute-tc[root@work ~]# containerd config default >/etc/containerd/config.toml[root@work ~]# vim /etc/containerd/config.toml61:     sandbox_image = "client:5000/k8s/pause:3.9"125:    SystemdCgroup = true154: 新插入        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]          endpoint = ["http://client:5000"]        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."client:5000"]          endpoint = ["http://client:5000"]        [plugins."io.containerd.grpc.v1.cri".registry.configs."client:5000".tls]          insecure_skip_verify = true[root@work ~]# systemctl enable --now kubelet containerd
# 加载内核模块[root@work ~]# vim /etc/modules-load.d/containerd.confoverlaybr_netfilterxt_conntrack[root@work ~]# systemctl start systemd-modules-load.service 
# 设置内核参数[root@work ~]# vim /etc/sysctl.d/99-kubernetes-cri.confnet.ipv4.ip_forward = 1net.bridge.bridge-nf-call-iptables = 1net.bridge.bridge-nf-call-ip6tables = 1net.netfilter.nf_conntrack_max = 1000000[root@work ~]# sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf

4. x-k8s 初始化

# 设置 tab 键[root@x-k8s ~]# source <(kubeadm completion bash|tee /etc/bash_completion.d/kubeadm)[root@x-k8s ~]# source <(kubectl completion bash|tee /etc/bash_completion.d/kubectl)[root@x-k8s ~]# kubeadm config print init-defaults >init.yaml[root@x-k8s ~]# vim init.yaml11: localAPIEndpoint:12:   advertiseAddress: 192.168.2.11    # 这里设置本机IP13:   bindPort: 6443                    # 这里配置 apiServer 端口号14: nodeRegistration:15:   criSocket: unix:///run/containerd/containerd.sock    # 这里设置 Runtime socket 地址16:   imagePullPolicy: IfNotPresent17:   name: x-k8s    # 这里设置本机名称18:   taints: null19: ---20: controlPlaneEndpoint: 192.168.2.100:6443    # 新增加一行,设置 VIP 地址
31: imageRepository: client:5000/k8s    # 镜像仓库地址32: kind: ClusterConfiguration33: kubernetesVersion: 1.29.2           # 镜像版本34: networking:35:   dnsDomain: cluster.local36:   podSubnet: 10.244.0.0/16          # 新增 Pod 子网37:   serviceSubnet: 10.245.0.0/16      # 服务子网38: scheduler: {}---     # 以下为新增加的配置kind: KubeProxyConfigurationapiVersion: kubeproxy.config.k8s.io/v1alpha1mode: ipvsipvs:  strictARP: true---kind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1cgroupDriver: systemd
[root@x-k8s ~]# kubeadm init --config=init.yaml [root@x-k8s ~]# export KUBECONFIG=/etc/kubernetes/admin.conf[root@x-k8s ~]# kubectl get nodesNAME    STATUS     ROLES           AGE   VERSIONx-k8s   NotReady   control-plane   23s   v1.29.2
# 上传证书文件[root@x-k8s ~]# kubeadm init --config=init.yaml phase upload-certs --upload-certs[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace[upload-certs] Using certificate key:<这个 certificate-key 是加入集群的>

5. y-k8s 初始化

[root@y-k8s ~]# kubeadm join 192.168.2.100:6443 --control-plane \                    --token <token值> \                    --discovery-token-ca-cert-hash sha256:<ca证书hash>  \                    --certificate-key <证书秘钥>

6. z-k8s 初始化

[root@z-k8s ~]# kubeadm join 192.168.2.100:6443 --control-plane \                    --token <token值> \                    --discovery-token-ca-cert-hash sha256:<ca证书hash>  \                    --certificate-key <证书秘钥>

7. 计算节点

[root@z-k8s ~]# kubeadm join 192.168.2.100:6443 --token <token值> \                    --discovery-token-ca-cert-hash sha256:<ca证书hash>

8. 客户端

[root@client ~]# dnf install -y kubectl[root@client ~]# source <(kubectl completion bash|tee /etc/bash_completion.d/kubectl)[root@client ~]# mkdir -p $HOME/.kube[root@client ~]# rsync -av 192.168.2.11:/etc/kubernetes/admin.conf $HOME/.kube/config[root@client ~]# chown $(id -u):$(id -g) $HOME/.kube/config[root@client ~]# kubectl get nodesNAME        STATUS     ROLES           AGE     VERSIONwork-0001   NotReady   <none>          78s     v1.29.2work-0002   NotReady   <none>          72s     v1.29.2work-0003   NotReady   <none>          67s     v1.29.2x-k8s       NotReady   control-plane   18m     v1.29.2y-k8s       NotReady   control-plane   9m20s   v1.29.2z-k8s       NotReady   control-plane   5m47s   v1.29.2

9. 网络插件

[root@ecs-proxy s4]# rsync -av kubernetes/plugins/calico 192.168.2.10:/root/#----------------------------------------------------------------------------[root@client ~]# docker load -i calico/calico.tar.xz[root@client ~]# docker images --format '{{.Repository}}:{{.Tag}}' |while read i;do     docker tag ${i} client:5000/plugins/${i##*/};     docker push client:5000/plugins/${i##*/};     docker rmi ${i} client:5000/plugins/${i##*/};done[root@client ~]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1client:5000/plugins/\3,' calico/calico.yaml[root@client ~]# kubectl create -f calico/calico.yaml[root@client ~]# kubectl get nodesNAME        STATUS   ROLES           AGE     VERSIONwork-0001   Ready    <none>          5m5s    v1.29.2work-0002   Ready    <none>          4m59s   v1.29.2work-0003   Ready    <none>          4m54s   v1.29.2x-k8s       Ready    control-plane   22m     v1.29.2y-k8s       Ready    control-plane   13m     v1.29.2z-k8s       Ready    control-plane   9m34s   v1.29.2

验证集群

# 安装 etcd 工具软件包[root@client ~]# dnf install file:///root/etcd-3.3.11-2.el7.centos.x86_64.rpm # 拷贝授权证书[root@client ~]# mkdir pki[root@client ~]# rsync -av 192.168.2.11:/etc/kubernetes/pki/etcd/ca.crt pki/[root@client ~]# rsync -av 192.168.2.11:/etc/kubernetes/pki/apiserver-etcd-client.* pki/[root@client ~]# tree pkipki├── apiserver-etcd-client.crt├── apiserver-etcd-client.key└── ca.crt# 设置版本 3[root@client ~]# export ETCDCTL_API=3# 验证集群状态[root@client ~]# etcdctl --endpoints=https://192.168.2.11:2379 \                         --cacert pki/ca.crt \                         --cert pki/apiserver-etcd-client.crt \                         --key pki/apiserver-etcd-client.key \                         member list -w table# 查询主节点[root@client ~]# etcdctl --endpoints=https://192.168.2.{11..13}:2379 \                         --cacert pki/ca.crt \                         --cert pki/apiserver-etcd-client.crt \                         --key pki/apiserver-etcd-client.key \                         endpoint status -w table