标签(空格分隔): kubernetes系列


一: 系统环境介绍

1.1 系统介绍

系统:Rockylinux 9.2 x64 

cat /etc/hosts
---------
172.16.10.61    flyfish61
172.16.10.62    flyfish62
172.16.10.63    flyfish63
172.16.10.64    flyfish64
172.16.10.65    flyfish65


---------
  系统关闭selinux /firewalld 清空 iptables 规则
  做好 系统的 ssh 无密钥认证

kubeadmin HA+EXTERNAL ETCD 部署架构

image.png


1.2 下载工具准备

1.下载kubernetes1.27.+的二进制包
github二进制包下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md
 
wget https://dl.k8s.io/v1.27.2/kubernetes-server-linux-amd64.tar.gz
 
2.下载etcdctl二进制包
github二进制包下载地址:https://github.com/etcd-io/etcd/releases
 
wget https://github.com/etcd-io/etcd/releases/download/v3.5.5/etcd-v3.5.5-linux-amd64.tar.gz
 
3.docker-ce二进制包下载地址
二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
 
这里需要下载20.10.+版本
 
wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.24.tgz

4.下载cri-docker 
二进制包下载地址:https://github.com/Mirantis/cri-dockerd/releases/
 
wget  https://ghproxy.com/https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.6/cri-dockerd-0.2.6.amd64.tgz


5.containerd二进制包下载
github下载地址:https://github.com/containerd/containerd/releases
 
containerd下载时下载带cni插件的二进制包。
 
wget https://github.com/containerd/containerd/releases/download/v1.6.6/cri-containerd-cni-1.6.6-linux-amd64.tar.gz

6.下载cfssl二进制包
github二进制包下载地址:https://github.com/cloudflare/cfssl/releases
 
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
 
7.cni插件下载
github下载地址:https://github.com/containernetworking/plugins/releases
 
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
 
8.crictl客户端二进制下载
github下载:https://github.com/kubernetes-sigs/cri-tools/releases
 
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz

1.3系统初始化

# 安装依赖包
yum -y install wget jq psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl -y
# 关闭防火墙 与selinux 
systemctl disable --now firewalld 
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
# 关闭交换分区
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0
 
cat /etc/fstab
# /dev/mapper/centos-swap swap                    swap    defaults        0 0

# 

# 配置系统句柄数
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF

# 做系统无密码互信登陆
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="172.16.10.61	172.16.10.62 172.16.10.63 172.16.10.64 172.16.10.65 172.16.10.66 172.16.10.67"
export SSHPASS=flyfish225
for HOST in $IP;do
     sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done

# 升级系统内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm
修改阿里云 镜像源
mv /etc/yum.repos.d/elrepo.repo /etc/yum.repos.d/elrepo.repo.bak 
vim /etc/yum.repos.d/elrepo.repo 
----
[elrepo-kernel]
name=elrepoyum
baseurl=https://mirrors.aliyun.com/elrepo/kernel/el8/x86_64/
enable=1
gpgcheck=0
----
yum  --enablerepo=elrepo-kernel  install  kernel-lt

#使用序号为0的内核,序号0是前面查出来的可用内核编号
grub2-set-default 0

#生成 grub 配置文件并重启
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot

image.png


启用ipvs

yum install ipvsadm ipset sysstat conntrack libseccomp -y
mkdir -p /etc/modules-load.d/
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
 
systemctl restart systemd-modules-load.service
 
lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 180224  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          176128  1 ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs

image.png

1.4 修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.netfilter.nf_conntrack_max = 2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF

modprobe br_netfilter
lsmod |grep conntrack
modprobe ip_conntrack

sysctl -p /etc/sysctl.d/k8s.conf

image.png

1.5 所有节点安装docker

下载地址:https://download.docker.com/linux/static/stable/x86_64/docker-20.10.24.tgz

以下在所有节点操作。这里采用二进制安装,用yum安装也一样,所有节点全部安装docker


# 二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
# wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.24.tgz

 
#解压
tar xf docker-*.tgz 
#拷贝二进制文件
cp docker/* /usr/bin/
#创建containerd的service文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service
#准备docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
#准备docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
#创建docker组
groupadd docker
#启动docker
systemctl enable --now docker.socket  && systemctl enable --now docker.service
#验证
docker info
cat >/etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
    "https://docker.mirrors.ustc.edu.cn",
    "http://hub-mirror.c.163.com"
  ],
  "max-concurrent-downloads": 10,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
    },
  "data-root": "/var/lib/docker"
}
EOF
systemctl restart docker

image.png

安装cri-dockerd

# 由于1.24以及更高版本不支持docker所以安装cri-docker
# 下载cri-docker 
# wget  https://ghproxy.com/https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1.amd64.tgz
 
# 解压cri-docker
tar -zxvf cri-dockerd-0.3.1.amd64.tgz
cp cri-dockerd/cri-dockerd  /usr/bin/
chmod +x /usr/bin/cri-dockerd
 
# 写入启动配置文件
cat >  /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
 
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
 
StartLimitBurst=3
 
StartLimitInterval=60s
 
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
 
TasksMax=infinity
Delegate=yes
KillMode=process
 
[Install]
WantedBy=multi-user.target
EOF
 
# 写入socket配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
 
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
 
[Install]
WantedBy=sockets.target
EOF
 
# 进行启动cri-docker
systemctl daemon-reload ; systemctl enable cri-docker --now

image.png

三: 负载均衡高可用

3.1 安装keepalived 与 haproxy

3.1.1 配置keepalived
系统环境初始化:  [在flyfish61/flyfish62/flyfish63]
  1. 首先要安装 负载均衡器 haproxy + keepalived 
   yum install -y keepalived haproxy

image.png

cd /etc/keepalived/
cp -p keepalived.conf keepalived.conf.bak


vim /etc/keepalived/keepalived.conf
----
flyfish61:
 
 # cat >/etc/keepalived/keepalived.conf<<EOF 
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state MASTER
    interface ens33
    mcast_src_ip 172.16.10.61
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.10.200
    }
    track_script {
       chk_apiserver
    }
 }
EOF

----


service keepalived start 
systemctl enable keepalived
cd /etc/keepalived/
scp keepalived.conf root@flyfish62:/etc/keepalived/

login:
   flyfish62

ha2:
 
 # cat >/etc/keepalived/keepalived.conf<<EOF
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
   interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    mcast_src_ip 172.16.10.62
    virtual_router_id 51
    priority 99
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.10.200
    }
    track_script {
       chk_apiserver
    }
 }
EOF

----

service keepalived start 
systemctl enable keepalived

image.png

cd /etc/keepalived/
scp keepalived.conf root@flyfish63:/etc/keepalived/

login:
   flyfish63

ha3:
 
 # cat >/etc/keepalived/keepalived.conf<<EOF
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
   interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    mcast_src_ip 172.16.10.63
    virtual_router_id 51
    priority 99
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.10.200
    }
    track_script {
       chk_apiserver
    }
 }
EOF

----

service keepalived start 
systemctl enable keepalived
健康检查脚本:

cat > /etc/keepalived/check_apiserver.sh <<"EOF"
 #!/bin/bash
 err=0
 for k in $(seq 1 3)
 do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
 done
 
 if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
 else
    exit 0
 fi
EOF

image.png image.png image.png


测试 keepalived的 

ping 172.16.10.200

停掉flyfish61的 keepalived 负载 VIP 会票移到flyfish62/flyfish63上面
在启动flyfish61的keepalived  vip 会自动切回来

image.png

3.1.2 配置haproxy
配置haproxy 

修改 /etc/sysctl.conf 

vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind=1
---
sysctl -p 

cd /etc/haproxy/
mv haproxy.cfg haproxy.cfg.bak 
cat > /etc/haproxy/haproxy.cfg << EOF
 global
  maxconn 2000
  ulimit-n 16384
  log 127.0.0.1 local0 err
  stats timeout 30s

 defaults
  log global
  mode http
  option httplog
  timeout connect 5000
  timeout client 50000
  timeout server 50000
  timeout http-request 15s
  timeout http-keep-alive 15s

 frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

 frontend k8s-master
  bind 172.16.10.200:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-masters

 backend k8s-masters
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server  flyfish61  172.16.10.61:6443 check
  server  flyfish62  172.16.10.62:6443 check
  server  flyfish63  172.16.10.63:6443 check 
EOF

service haproxy start 
systemctl enable haproxy 

image.png

scp /etc/haproxy/haproxy.cfg root@flyfish62:/etc/haproxy/

---
修改 /etc/sysctl.conf 

systcl -a |grep local


vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind = 1
---
sysctl -p 

service haproxy start 
systemctl enable haproxy


scp /etc/haproxy/haproxy.cfg root@flyfish63:/etc/haproxy/

---
修改 /etc/sysctl.conf 

systcl -a |grep local


vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind = 1
---
sysctl -p 

service haproxy start 
systemctl enable haproxy


image.png image.png image.png

四: 安装k8s

4.1 配置 yum 源

# cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum makecache 

image.png


4.2 安装 k8s1.27.3

默认安装最新版本
# yum -y install  kubeadm  kubelet kubectl

image.png

查看指定版本
# yum list kubeadm.x86_64 --showduplicates | sort -r
# yum list kubelet.x86_64 --showduplicates | sort -r
# yum list kubectl.x86_64 --showduplicates | sort -r

安装指定版本
# yum -y install  kubeadm-1.27.X-0  kubelet-1.27.X-0 kubectl-1.27.X-0

4.3 设置kubelet

为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。

# vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
# systemctl enable kubelet
# systemctl enable kubelet.service

image.png

4.4 准备docker 镜像

镜像准备

kubeadm config images list --kubernetes-version=v1.27.3
---
registry.k8s.io/kube-apiserver:v1.27.3
registry.k8s.io/kube-controller-manager:v1.27.3
registry.k8s.io/kube-scheduler:v1.27.3
registry.k8s.io/kube-proxy:v1.27.3
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.7-0
registry.k8s.io/coredns/coredns:v1.10.1

---

docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.27.3
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.27.3
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.27.3
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.27.3
docker pull registry.aliyuncs.com/google_containers/pause:3.9
docker pull registry.aliyuncs.com/google_containers/etcd:3.5.7-0
docker pull registry.aliyuncs.com/google_containers/coredns:v1.10.1
docker tag registry.aliyuncs.com/google_containers/coredns:v1.10.1 registry.aliyuncs.com/google_containers/coredns/coredns:v1.10.1

# cat image_download.sh
#!/bin/bash
images_list='
镜像列表'

for i in $images_list
do
        docker pull $i
done

docker save -o k8s-1-27-X.tar $images_list


4.5 初始化集群

kubeadm init --control-plane-endpoint=172.16.10.200:16443 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.27.3 --service-cidr=10.10.0.0/16 --pod-network-cidr=10.244.0.0/16 --cri-socket unix:///var/run/cri-dockerd.sock 

[init] Using Kubernetes version: v1.27.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [flyfish61 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.10.0.1 172.16.10.61 172.16.10.200]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [flyfish61 localhost] and IPs [172.16.10.61 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [flyfish61 localhost] and IPs [172.16.10.61 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
W0707 11:20:29.679293    4002 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
W0707 11:20:29.987728    4002 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
W0707 11:20:30.127087    4002 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
W0707 11:20:30.207521    4002 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 14.524847 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node flyfish61 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node flyfish61 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: pnqpc4.bj52losu5dqwywoz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
W0707 11:20:51.799589    4002 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 172.16.10.200:16443 --token pnqpc4.bj52losu5dqwywoz \
        --discovery-token-ca-cert-hash sha256:e5d70788b9af8c1f6ba6dcd8e6f863577dd9c2b93e953edee1e90bd4478edeb2 \
        --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.10.200:16443 --token pnqpc4.bj52losu5dqwywoz \
        --discovery-token-ca-cert-hash sha256:e5d70788b9af8c1f6ba6dcd8e6f863577dd9c2b93e953edee1e90bd4478edeb2

image.png


flyfish61 主节点 准备kubelet配置文件

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
  export KUBECONFIG=/etc/kubernetes/admin.conf

image.png

4.6 其它master 节点 加入集群

同步key 到 其它master 节点flyfish62/flyfish63 

scp -r /etc/kubernetes/pki root@flyfish62:/etc/kubernetes/
scp -r /etc/kubernetes/pki root@flyfish63:/etc/kubernetes/

image.png

可以把不需要的证书删除掉,flyfish62及flyfish63操作方法一致。
 cd /etc/kubernetes/pki/
rm -rf apiserver* 
rm -rf etcd/peer.*
rm -rf etcd/server.* 

image.png image.png

将flyfish62/flyfish63 加入集群
kubeadm join 172.16.10.200:16443 --token pnqpc4.bj52losu5dqwywoz \
        --discovery-token-ca-cert-hash sha256:e5d70788b9af8c1f6ba6dcd8e6f863577dd9c2b93e953edee1e90bd4478edeb2 \
        --control-plane --cri-socket unix:///var/run/cri-dockerd.sock 

image.png image.png image.png image.png

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

image.png

kubectl get node 

image.png image.png image.png

4.7 其它worker节点加入集群

worker 节点加入:flyfish64/flyfish65

kubeadm join 172.16.10.200:16443 --token pnqpc4.bj52losu5dqwywoz \
        --discovery-token-ca-cert-hash sha256:e5d70788b9af8c1f6ba6dcd8e6f863577dd9c2b93e953edee1e90bd4478edeb2 --cri-socket unix:///var/run/cri-dockerd.sock 

  

image.png image.png

kubectl get node 

image.png

五:安装网络插件calico

5.1 安装calico 网络插件

网络组件有很多种,只需要部署其中一个即可,推荐Calico。

Calico是一个纯三层的数据中心网络方案,Calico支持广泛的平台,包括Kubernetes、OpenStack等。

Calico 在每一个计算节点利用 Linux Kernel 实现了一个高效的虚拟路由器( vRouter) 来负责数据转发,而每个 vRouter 通过 BGP 协议负责把自己上运行的 workload 的路由信息向整个 Calico 网络内传播。

此外,Calico 项目还实现了 Kubernetes 网络策略,提供ACL功能。

1.下载Calico

wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml

vim calico.yaml
...
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"
...


镜像准备:
  cat calico.yaml |grep image


image.png

docker pull docker.io/calico/cni:v3.25.0
docker pull docker.io/calico/node:v3.25.0
docker pull docker.io/calico/kube-controllers:v3.25.0
kubectl apply -f calico.yaml

kubectl get pod -n kube-system 

image.png

kubectl get node 

image.png

5.2 拿掉master节点的的污点

#查看污点 
kubectl describe node flyfish61 | grep -i taint
kubectl describe node flyfish62 | grep -i taint
kubectl describe node flyfish63 | grep -i taint
---

Taints:             node-role.kubernetes.io/control-plane:NoSchedule

---
#去除污点
kubectl taint node flyfish61 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node flyfish62 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node flyfish63 node-role.kubernetes.io/control-plane:NoSchedule-

image.png

六:部署metric-server

 kubectl apply -f components.yaml

image.png image.png

kubectl top node 
kubectl top pod -n kube-system 

image.png

七:部署dashboard

7.1 部署dashborad

https://github.com/kubernetes/dashboard/releases/tag/v2.7.0
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
目前最新版本v2.7.0 

vim recommended.yaml

----
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard
----


kubectl apply -f recommended.yaml

image.png

kubectl get pods -n kubernetes-dashboard
kubectl get pods,svc -n kubernetes-dashboard

image.png

创建用户:
wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard-user.yaml

kubectl apply -f dashboard-user.yaml

image.png

创建token 
kubectl -n kubernetes-dashboard create token admin-user

image.png

7.2 登录浏览器访问

https://172.16.10.61:30001
输入token:
----
eyJhbGciOiJSUzI1NiIsImtpZCI6Inc2c1FJVmhaa0hkbmlkSkYzWUg5ZHBHOHV2QmJFMEIteUpCcnZtZjQ3dVUifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjg4NzA4MjI4LCJpYXQiOjE2ODg3MDQ2MjgsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMTg2NTU1YWQtMzA2MC00ZTllLTlhMTItZjc4MGQwMWYwZmM4In19LCJuYmYiOjE2ODg3MDQ2MjgsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.lxPM1BG3ZdvJ9jtnlKqay--E_eGdNcnc6kY8SMJYq2cysVslvCqp9fQZHBRBtiQpjPPfjd7J8QDsIbEsmYdkPt5EmOw4KqBq4t-6m0dFpwy0HeVyChIJAXRdWVFYnOrN_0fwMAO-Pc2h9qfl4K9Oht5JMXe-Nb7-_06imtktpSWdmvAd59PGCF5aFCtYZFGAXVfoPrxai1GuIsKOoyWASfVnttiw2Xmt_TbEgNVua48NHuLbHEaWB-geyOGG0qdLqhzMcvMMis4t00kL_Ubc2TqRkvUvPczZf_eswnTZw8t4uj0U1ou9vlQvSXoS1yDqnyso4lVJvJEnClc3CXim_g

----


image.png

image.png image.png