kubeadm keepalived haproxy containerd部署高可用k8s集群
1. 操作系统及软件版本
[root@localhost ~]# rpm -qa |grep release
centos-release-8.2-2.2004.0.1.el8.x86_64
[root@localhost ~]# uname -r
4.18.0-193.el8.x86_64
2. 主机规划
主机 | 管理IP | 业务IP | 安装软件 |
kube-master1 | 192.168.1.21/24 | 192.168.8.10/24 | keepalived haproxy ipvsadm containerd kubeadm-1.21.5 kubelet-1.21.5 kubectl-1.21.5 |
kube-master2 | 192.168.1.22/24 | 192.168.8.11/24 | keepalived haproxy ipvsadm containerd kubeadm-1.21.5 kubelet-1.21.5 kubectl-1.21.5 |
kube-master3 | 192.168.1.23/24 | 192.168.8.12/24 | keepalived haproxy ipvsadm containerd kubeadm-1.21.5 kubelet-1.21.5 kubectl-1.21.5 |
kube-node1 | 192.168.1.24/24 | 192.168.8.13/24 | ipvsadm containerd kubeadm-1.21.5 kubelet-1.21.5 kubectl-1.21.5 |
kube-node2 | 192.168.1.25/24 | 192.168.8.14/24 | ipvsadm containerd kubeadm-1.21.5 kubelet-1.21.5 kubectl-1.21.5 |
vip | 192.168.8.100/24 | 使用keepalived虚拟ip |
3. 设置主机名
[root@localhost ~]# hostnamectl set-hostname kube-master1 ###五台主机依次设置主机名
4.配置业务IP
Centos8默认使用NetworkManager管理网络,不建议直接修改network配置文件,可使用nmcli或nmtui命令进行设置IP,该命令会直接写入network配置文件。
[root@kube-master1 ~]# nmcli connection modify ens18 +ipv4.addresses 192.168.8.10/24 ###五台主机依次设置业务ip
[root@kube-master1 ~]# nmcli connection up ens18
5. 配置ssh-key便于从kube-mater1节点免密登录其他节点
[root@kube-master1 ~]# ssh-keygen #在master1节点生成ssh公钥
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:h/PAw3UFBEgHiFLnoWYTTAepj9L5iA/bU2wudYlAI5k root@kube-master1
The key's randomart image is:
+---[RSA 3072]----+
| o +=++oooooo.. |
|E + +*.... . |
| o += . . . |
| oo . o o . |
| . * . .S . |
|. + B o * |
|.o B . . |
|.++ o |
|..oo |
+----[SHA256]-----+
[root@kube-master1 ~]# for i in {1..5};do ssh-copy-id root@192.168.1.2$i;done ###copy ssh公钥到其他节点
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '192.168.1.21 (192.168.1.21)' can't be established.
ECDSA key fingerprint is SHA256:3u7rfasPzAiIdCfM2HG2Xs+gY49KmBCxyKSo2oBPH6I.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@192.168.1.21's password:
..................................此处终端输出省略.................................
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@192.168.1.25'"
and check to make sure that only the key(s) you wanted were added.
6. 配置dns本地缓存文件
[root@kube-master1 ~]# cat >> /etc/hosts <<EOF
> 192.168.8.100 kube-vip
> 192.168.8.10 kube-master1
> 192.168.8.11 kube-master2
> 192.168.8.12 kube-master3
> 192.168.8.13 kube-node1
> 192.168.8.14 kube-node2
> EOF
[root@kube-master1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.8.100 kube-vip
192.168.8.10 kube-master1
192.168.8.11 kube-master2
192.168.8.12 kube-master3
192.168.8.13 kube-node1
192.168.8.14 kube-node2
[root@kube-master1 ~]# for i in {1..5};do scp -r /etc/hosts 192.168.1.2$i:/etc/hosts;done ###copy dns缓存文件至其他节点
hosts 100% 308 6.2KB/s 00:00
..................................此处终端输出省略.................................
7. 关闭firewalld selinux swap
[root@kube-master1 ~]# for i in {1..5};do ssh root@192.168.1.2$i systemctl disable --now firewalld;done ###依次关闭所有节点firewalld,当然也可以不关闭,只不过需要开放很多端口太麻烦。
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
..................................此处终端输出省略.................................
[root@kube-master1 ~]# for i in {1..5};do ssh root@192.168.1.2$i sed -i s/SELINUX=enforcing/SELINUX=disabled/ /etc/selinux/config;done ###修改所有节点selinux配置文件关闭selinux
[root@kube-master1 ~]# for i in {1..5};do ssh root@192.168.1.2$i setenforce 0;done ###临时关闭所有节点selinux
[root@kube-master1 ~]# for i in {1..5} ;do ssh root@192.168.1.2$i sed -i /swap/d /etc/fstab;done ###删除交换分区自动挂载。
[root@kube-master1 ~]# for i in {1..5} ;do ssh root@192.168.1.2$i swapoff -a ;done ###临时关闭交换分区
8. 安装keepalived ipvsadm haproxy
因本地有搭建Centos yum源所以这三个软件就使用本地的源安装;3个master节点需要安装keepalived ipvsadm haproxy; 2个node节点只需安装ipvasdm
[root@kube-master1 ~]# for i in {1..3} ;do ssh root@192.168.1.2$i yum install keepalived haproxy ipvsadm -y ;done
[root@kube-master1 ~]# vim /etc/keepalived/keepalived.conf ###编辑keepalived 配置文件
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict ### vrrp_strict字段一定要注释或者删除,可能会造成别的节点无法访问vip
vrrp_garp_interval 0
vrrp_gna_interval 0
}
##########################以上部分可以不用管##########################
vrrp_instance VI_1 {
state MASTER ### 设置kube-master1 为MASTER 另外两个kube-master节点设置为BACKUP
interface ens18 ### 修改当前系统网络接口
virtual_router_id 51 ### 虚拟路由id需要设置为一致
priority 100 ### 另外两个kube-master节点优先级需要比这个值低;可以比如一个99;一个98;
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.8.100/24 ### 这里设置为kube-vip地址
}
}
##########################以下部分可以删除掉##########################
[root@kube-master1 ~]# systemctl enable --now keepalived.service ### 3个kube-master节点设置keepalived开机自启动
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
[root@kube-master1 ~]# ip a |grep inet ### 现在可以看到kube-vip已经在MASTER节点上了。可以关掉当前节点,你会看到kube-vip(192.168.8.100/24)漂移到优先级为99的节点上
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
inet 192.168.1.21/24 brd 192.168.1.255 scope global noprefixroute ens18
inet 192.168.8.10/24 brd 192.168.8.255 scope global noprefixroute ens18
inet 192.168.8.100/24 scope global secondary ens18
[root@kube-master1 ~]# vim /etc/haproxy/haproxy.cfg ### 编辑haproxy配置文件;直接在结尾添加以下内容即可
##########################配置web管理界面##########################
listen admin_status
bind *:9000 ### 配置web监听端口
mode http ### 配置协议
stats enable ### 开启状态检测
stats refresh 2s ### 状态2秒刷新1次
stats realm Haproxy\ status
stats uri /admin?status ### 配置web管理登录界面登录入口 比如master1的管理IP为192.168.1.21 在浏览器输入http://192.168.1.21:9000/admin?status 即可登录web管理界面
stats auth admin:admin ### 配置web管理界面登录的用户名及密码
##########################配置监听7443端口##########################
frontend kubernetes ### 可以认为是一个入口,流量进入后发现是通往7443的流量就使用use_backend字段后面的规则负载到其他节点;
bind *:7443
mode tcp
use_backend kubernetes-server
##########################配置后端负载均衡##########################
backend kubernetes-server
mode tcp
balance roundrobin
server kube-master1 192.168.8.10:6443 check
server kube-master2 192.168.8.11:6443 check
server kube-master3 192.168.8.12:6443 check
####################################################################
# copy 配置文件到3个master节点 并设置开机自启动 #
####################################################################
[root@kube-master1 ~]# systemctl enable --now haproxy.service
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i touch /etc/sysconfig/ipvsadm ;done ### 创建空的ipvsadm配置文件;不创建的话ipvsadm会起不来
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i systemctl enable --now ipvsadm ;done ### 设置所有节点ipvsadm 开机自启动
Created symlink /etc/systemd/system/multi-user.target.wants/ipvsadm.service → /usr/lib/systemd/system/ipvsadm.service.
Created symlink /etc/systemd/system/multi-user.target.wants/ipvsadm.service →
9. 安装containerd
本次使用containerd 作为运行时,可以使用yum安装,配置docker的yum源会包含containerd;本次直接使用安装包安装;
containerd 下载地址:https://github.com/containerd/containerd/releases/tag/v1.5.6
github上containerd主要有以下两个安装包;需配合k8s使用即选择cri-containerd-cni-1.5.6-linux-amd64.tar.gz
containerd-1.5.6-linux-amd64.tar.gz
cri-containerd-cni-1.5.6-linux-amd64.tar.gz
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i yum install tar -y ;done ### Centos8.2mini版未自动安装tar工具,所以需要安装;
[root@kube-master1 ~]# for i in {1..5};do scp cri-containerd-cni-1.5.6-linux-amd64.tar.gz 192.168.1.2$i:/root/cri-containerd-cni-1.5.6-linux-amd64.tar.gz ;done ### 上传软件包到其他节点
cri-containerd-cni-1.5.6-linux-amd64.tar.gz 100% 121MB 41.4MB/s 00:02
cri-containerd-cni-1.5.6-linux-amd64.tar.gz 100% 121MB 20.2MB/s 00:05
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i tar -xvf /root/cri-containerd-cni-1.5.6-linux-amd64.tar.gz -C / ;done ### 直接解压-C 指定根目录就好
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i mkdir -p /etc/containerd/ && containerd config default > /etc/containerd/config.toml ;done ### 每个节点生成默认配置文件
[root@kube-master1 ~]# vim /etc/containerd/config.toml ### 所有节点修改配置文件
[plugins."io.containerd.grpc.v1.cri"]
.........................省略部分内容.................................
sandbox_image = "k8s.gcr.io/pause:3.4.1" ### 默认pause版本为3.5
.........................省略部分内容.................................
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
.........................省略部分内容.................................
SystemdCgroup = true ###将默认false改为true
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i systemctl enable --now containerd ;done ### 设置containerd开机自启动
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /etc/systemd/system/containerd.service.
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /etc/systemd/system/containerd.service.
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /etc/systemd/system/containerd.service.
10. 修改节点内核参数
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
##############################################
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
##############################################
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
##############################################
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
##############################################
sysctl --system
11. 配置k8s阿里镜像yum源
[root@kube-master1 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo ###生成k8s yum源配置文件
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=0
> #repo_gpgcheck=1
> #gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg #https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@kube-master1 ~]# for i in {2..5};do scp /etc/yum.repos.d/kubernetes.repo root@192.168.1.2$i:/etc/yum.repos.d/kubernetes.repo;done ### copy yum配置文件到各个节点
kubernetes.repo 100% 276 133.0KB/s 00:00
..................................此处终端输出省略.................................
[root@kube-master1]# for i in {1..5};do ssh root@192.168.1.2$i yum install kubeadm-1.21.5 kubelet-1.21.5 -y;done ###安装kubeadm kubelet, kubectl会被作为kubelet依赖自动安装;安装时需要注意版本
..................................此处终端输出省略.................................
Installed:
conntrack-tools-1.4.4-10.el8.x86_64
cri-tools-1.13.0-0.x86_64
kubeadm-1.21.5-0.x86_64
kubectl-1.22.2-0.x86_64
kubelet-1.21.5-0.x86_64
kubernetes-cni-0.8.7-0.x86_64
libnetfilter_cthelper-1.0.0-15.el8.x86_64
libnetfilter_cttimeout-1.0.0-11.el8.x86_64
libnetfilter_queue-1.0.2-11.el8.x86_64
socat-1.7.3.3-2.el8.x86_64
Complete!
[root@kube-master1 ~]# for i in {1..5};do ssh root@192.168.1.2$i systemctl enable --now kubelet;done ### 设置所有节点kubelet 开机自启动
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
12. 导入k8s镜像
可以配置containerd的配置文件指定镜像仓库,也可手动导入;
[root@kube-master1 ~]# kubeadm config images list ### 查看k8s所需镜像。
I1008 05:16:33.139137 8468 version.go:254] remote version is much newer: v1.22.2; falling back to: stable-1.21
k8s.gcr.io/kube-apiserver:v1.21.5
k8s.gcr.io/kube-controller-manager:v1.21.5
k8s.gcr.io/kube-scheduler:v1.21.5
k8s.gcr.io/kube-proxy:v1.21.5
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0
[root@kube-master1 ~]# kubeadm config images pull ### 如果指定了containerd镜像仓库,可使用此命令直接拉取
[root@kube-master1 ~]# for i in {1..5};do ssh 192.168.1.2$i ctr ns c k8s.io ;done ###由于本次采用手动导入镜像所以需要先手动创建k8s.io命令空间
[root@kube-master1 ~]# ctr ns ls
NAME LABELS
k8s.io
[root@kube-master1 img]# ls
coredns_v1.8.0.tar kube-apiserver_v1.21.5.tar kube-proxy_v1.21.5.tar pause_3.4.1.tar
etcd_3.4.13-0.tar kube-controller-manager_v1.21.5.tar kube-scheduler_v1.21.5.tar import-images.sh
######import-images.sh脚本内容如下,用于导入镜像,master节点全部导入 node节点只需导入pause kube-proxy
###这些镜像用docker pull下来的,镜像地址:registry.cn-hangzhou.aliyuncs.com/google_containers/
mirrors=registry.cn-hangzhou.aliyuncs.com/google_containers/
ctr -n k8s.io image import coredns_v1.8.0.tar
ctr -n k8s.io image import kube-apiserver_v1.21.5.tar
ctr -n k8s.io image import kube-controller-manager_v1.21.5.tar
ctr -n k8s.io image import kube-scheduler_v1.21.5.tar
ctr -n k8s.io image import kube-proxy_v1.21.5.tar
ctr -n k8s.io image import pause_3.4.1.tar
ctr -n k8s.io image import etcd_3.4.13-0.tar
####################################################################################################
ctr -n k8s.io images tag ${mirrors}coredns:v1.8.0 k8s.gcr.io/coredns/coredns:v1.8.0
ctr -n k8s.io images tag ${mirrors}etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0
ctr -n k8s.io images tag ${mirrors}kube-apiserver:v1.21.5 k8s.gcr.io/kube-apiserver:v1.21.5
ctr -n k8s.io images tag ${mirrors}kube-controller-manager:v1.21.5 k8s.gcr.io/kube-controller-manager:v1.21.5
ctr -n k8s.io images tag ${mirrors}kube-proxy:v1.21.5 k8s.gcr.io/kube-proxy:v1.21.5
ctr -n k8s.io images tag ${mirrors}kube-scheduler:v1.21.5 k8s.gcr.io/kube-scheduler:v1.21.5
ctr -n k8s.io images tag ${mirrors}pause:3.4.1 k8s.gcr.io/pause:3.4.1
################################################################################################################
ctr -n k8s.io images rm ${mirrors}coredns:v1.8.0
ctr -n k8s.io images rm ${mirrors}etcd:3.4.13-0
ctr -n k8s.io images rm ${mirrors}kube-apiserver:v1.21.5
ctr -n k8s.io images rm ${mirrors}kube-controller-manager:v1.21.5
ctr -n k8s.io images rm ${mirrors}kube-proxy:v1.21.5
ctr -n k8s.io images rm ${mirrors}kube-scheduler:v1.21.5
ctr -n k8s.io images rm ${mirrors}pause:3.4.1
crictl images list
################################################################################################################
[root@kube-master1 img]# crictl images list
IMAGE TAG IMAGE ID SIZE
k8s.gcr.io/coredns/coredns v1.8.0 296a6d5035e2d 42.6MB
k8s.gcr.io/etcd 3.4.13-0 0369cf4303ffd 255MB
k8s.gcr.io/kube-apiserver v1.21.5 7b2ac941d4c30 127MB
k8s.gcr.io/kube-controller-manager v1.21.5 184ef4d127b40 121MB
k8s.gcr.io/kube-proxy v1.21.5 e08abd2be7302 105MB
k8s.gcr.io/kube-scheduler v1.21.5 8e60ea3644d6d 52.1MB
k8s.gcr.io/pause 3.4.1 0f8457a4c2eca 686kB
13. 初始化集群
kubeadm init --control-plane-endpoint 192.168.8.100:7443 --cri-socket unix:///run/containerd/containerd.sock --service-cidr 10.10.0.0/16 --pod-network-cidr 10.20.0.0/16 --kubernetes-version 1.21.5 --upload-certs
[root@kube-master1 ~]# kubeadm init --control-plane-endpoint 192.168.8.100:7443 --cri-socket unix:///run/containerd/containerd.sock --service-cidr 10.10.0.0/16 --pod-network-cidr 10.20.0.0/16 --kubernetes-version 1.21.5 --upload-certs
[init] Using Kubernetes version: v1.21.5
[preflight] Running pre-flight checks
..............................................输出省略部分....................................
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
##############################在主节点kube-master1执行下面三条命令###########################
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
##############################在剩余master节点kube-master2 kube-master3执行下面命令###########################
kubeadm join 192.168.8.100:7443 --token 5zm8ck.tssy1qxcw7npaw5a \
--discovery-token-ca-cert-hash sha256:88ad10a9bea524e56837ca9d960ddde0108d60ce2a1267aa891e81bc8b7107c4 \
--control-plane --certificate-key 53826137e0f39e4302119776b6104a938ff8a0712934f767792686e4c4191ab3
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
##############################在两个node节点kube-node1—kube-node2执行下面命令###########################
kubeadm join 192.168.8.100:7443 --token 5zm8ck.tssy1qxcw7npaw5a \
--discovery-token-ca-cert-hash sha256:88ad10a9bea524e56837ca9d960ddde0108d60ce2a1267aa891e81bc8b7107c4
[root@kube-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kube-master1 NotReady control-plane,master 21m v1.21.5
kube-master2 Ready control-plane,master 15m v1.21.5
kube-master3 Ready control-plane,master 7m38s v1.21.5
kube-node1 Ready <none> 8m21s v1.21.5
kube-node2 Ready <none> 9m1s v1.21.5
14. 安装网络插件flannel
下载flannel配置文件:wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
net-conf.json: |
{
"Network": "10.20.0.0/16", ### 修改配置文件中此字段IP,修改为初始化集群时--pod-network-cidr 10.20.0.0/16的地址
"Backend": {
"Type": "vxlan"
}
}
上传flannel配置文件及flannel镜像到kube-master1节点
[root@kube-master1 flannel]# ls
flannel.tar kube-flannel.yml
[root@kube-master1 flannel]# for i in {2..5};do scp -r flannel.tar 192.168.1.2$i:/root/;done ###copy flannel镜像至其他节点
flannel.tar 100% 66MB 26.1MB/s 00:02
flannel.tar 100% 66MB 27.3MB/s 00:02
flannel.tar 100% 66MB 22.2MB/s 00:02
flannel.tar 100% 66MB 21.4MB/s 00:03
[root@kube-master1 flannel]# for i in {1..5};do ssh root@192.168.1.2$i ctr -n k8s.io image import flannel.tar;done ### 导入flannel镜像
unpacking quay.io/coreos/flannel:v0.14.0 (sha256:c933d330f7e737a715a4bbbf4e2378f5c386547b6e52a102f9dbf8060b4d05d3)...done
unpacking quay.io/coreos/flannel:v0.14.0 (sha256:c933d330f7e737a715a4bbbf4e2378f5c386547b6e52a102f9dbf8060b4d05d3)...done
unpacking quay.io/coreos/flannel:v0.14.0 (sha256:c933d330f7e737a715a4bbbf4e2378f5c386547b6e52a102f9dbf8060b4d05d3)...done
unpacking quay.io/coreos/flannel:v0.14.0 (sha256:c933d330f7e737a715a4bbbf4e2378f5c386547b6e52a102f9dbf8060b4d05d3)...done
[root@kube-master1 flannel]# kubectl create -f kube-flannel.yml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@kube-master1 flannel]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-558bd4d5db-kv5rx 1/1 Running 0 72m
coredns-558bd4d5db-z9sbk 1/1 Running 0 72m
etcd-kube-master1 1/1 Running 0 73m
etcd-kube-master2 1/1 Running 0 67m
etcd-kube-master3 1/1 Running 0 59m
kube-apiserver-kube-master1 1/1 Running 0 72m
kube-apiserver-kube-master2 1/1 Running 0 67m
kube-apiserver-kube-master3 1/1 Running 0 59m
kube-controller-manager-kube-master1 1/1 Running 1 72m
kube-controller-manager-kube-master2 1/1 Running 0 67m
kube-controller-manager-kube-master3 1/1 Running 0 59m
kube-flannel-ds-8nt8t 1/1 Running 0 119s
kube-flannel-ds-cfr22 1/1 Running 0 119s
kube-flannel-ds-ltz7j 1/1 Running 0 2m
kube-flannel-ds-rk9nw 1/1 Running 0 119s
kube-flannel-ds-z87b8 1/1 Running 0 119s
kube-proxy-7qnsj 1/1 Running 0 72m
kube-proxy-k95xw 1/1 Running 0 59m
kube-proxy-lx9rv 1/1 Running 0 67m
kube-proxy-m4b86 1/1 Running 0 60m
kube-proxy-z864h 1/1 Running 0 59m
kube-scheduler-kube-master1 1/1 Running 1 72m
kube-scheduler-kube-master2 1/1 Running 0 67m
kube-scheduler-kube-master3 1/1 Running 0 59m
############################################所有pod为running状态就表示部署完毕##################################
15. 注:
- 前面部分忘了配置服务器时间同步。正常使用一定要配置时钟同步;
- keepalived需要配置检测脚本,比如当kube-master1节点上haproxy挂掉后,但vip还在kube-master1节点上,就不能在负载流量到其他master节点,所以需要配置检测脚本,比如当kube-master1节点上haproxy挂掉后,应当kill掉kube-master1节点上的keepalived进程,使vip漂移到其他master节点;
- 。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。