一、系统相关配置

1.1、系统基础配置相关


1、配置静态IP地址(注意各节点地址)
[root@localhost ~]# cd /etc/sysconfig/network-scripts/
[root@localhost ~]# cp ifcfg-ens33 ifcfg-ens33.bak

[root@localhost ~]# vim ifcfg-ens33
BOOTPROTO="static"         # 更改等号后面为 static (小写)
ONBOOT="yes"               # 确保等号后面为 yes    (小写)

IPADDR=192.168.0.110        # 增加这几行 ( 等号前面必须大写),另外两个为 111 、112
NETMASK=255.255.255.0
GATEWAY=192.168.0.254

[root@localhost ~]# systemctl restart network
[root@localhost ~]# ip add | grep 192
inet 192.168.0.110/24 brd 192.168.10.255 scope global noprefixroute ens33


2、配置域名解析地址
[root@localhost ~]# echo -e "nameserver  114.114.114.114  \nnameserver 202.96.209.5"  >> /etc/resolv.conf
[root@localhost ~]# ping www.qq.com
PING ins-r23tsuuf.ias.tencent-cloud.net (101.91.42.232) 56(84) bytes of data.
64 bytes from 101.91.42.232 (101.91.42.232): icmp_seq=1 ttl=128 time=6.01 ms
64 bytes from 101.91.42.232 (101.91.42.232): icmp_seq=2 ttl=128 time=8.69 ms
64 bytes from 101.91.42.232 (101.91.42.232): icmp_seq=3 ttl=128 time=7.12 ms


3、关闭 Selinux 服务
[root@localhost ~]# setenforce 0    # 临时关闭(重启后无效)
[root@localhost ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config 


4、关闭 Firewalld 防火墙
[root@localhost ~]# systemctl stop firewalld && systemctl disable firewalld
[root@localhost ~]# systemctl status firewalld


5、设置主机名
[root@localhost ~]# hostnamectl set-hostname master   #  其它两节点为 node01 、 node02
[root@localhost ~]# bash


6、添加主机名本地解析
[root@master ~]#  cat >> /etc/hosts << EOF
192.168.0.110  master
192.168.0.111  node01
192.168.0.112  node02
EOF
[root@master ~]#  scp -r /etc/hosts node01:/etc
[root@master ~]#  scp -r /etc/hosts node02:/etc


7、配置 Master 到 Node 节点的免密 (非必须)
[root@master ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[root@master ~]# ssh-copy-id node01
[root@master ~]# ssh-copy-id node02

1.2、系统基础优化相关


### 说明: 除了第 1、2 两点,其它的为 非必须项 

1、更新配置YUM源
[root@localhost ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@localhost ~]# yum clean all
[root@localhost ~]# yum makecache


2、安装常见的命令包及依赖
[root@localhost ~]# yum -y install epel-release net-tools sysstat vim gcc gcc-c++ curl wget git telnet ntp zip unzip


3、配置时间同步
[root@localhost ~]# systemctl start ntpd && systemctl enable ntpd
[root@localhost ~]# timedatectl set-timezone "Asia/Shanghai"
[root@localhost ~]# ntpdate time1.aliyun.com
[root@localhost ~]# date


4、设置 Rc.local开机启动
[root@localhost ~]# chmod +x /etc/rc.d/rc.local
[root@localhost ~]# systemctl enable rc-local.service
[root@localhost ~]# systemctl start rc-local.service


5、SSH服务优化
[root@localhost ~]# sed -i 's/#UseDNS yes/UseDNS no/g' /etc/ssh/sshd_config
[root@localhost ~]# sed -i 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' /etc/ssh/sshd_config
[root@localhost ~]# systemctl restart sshd.service


6、关闭IPV6功能    
[root@localhost ~]# sed -i 's/^IPV6INIT=.*/IPV6INIT=no/' /etc/sysconfig/network-scripts/ifcfg-ens33
[root@localhost ~]# echo "NETWORKING_IPV6 = no" >> /etc/sysconfig/network
[root@localhost ~]# echo "net.ipv6.conf.all.disable_ipv6 =1 " >> /etc/sysctl.conf

[root@localhost ~]# sysctl -p  


7、更改资源文件句柄限制
[root@localhost ~]# ulimit -SHn 655360
[root@localhost ~]# cat >> /etc/security/limits.conf << EOF
* soft nofile 655360
* hard nofile 655360
* soft nproc 655360
* hard nproc 655360
EOF


8、内核参数优化(以4C8G为例)
[root@localhost ~]# sed -i "s/4096/100000/g" /etc/security/limits.d/20-nproc.conf
[root@localhost ~]# sed -i "s/4096/100000/g" /etc/security/limits.d/20-nproc.conf
[root@localhost ~]# sed -i "s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=100000/g" /etc/systemd/system.conf
[root@localhost ~]# sed -i "s/#DefaultLimitNPROC=/DefaultLimitNPROC=100000/g" /etc/systemd/system.conf
[root@localhost ~]# sed -i "s/#DefaultLimitCORE=/DefaultLimitCORE=infinity/g" /etc/systemd/user.conf
[root@localhost ~]# sed -i "s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=100000/g" /etc/systemd/user.conf
[root@localhost ~]# sed -i "s/#DefaultLimitNPROC=/DefaultLimitNPROC=100000/g" /etc/systemd/user.conf
[root@localhost ~]# systemctl daemon-reload


9、系统参数优化相关(以8C16G为例)
[root@localhost ~]# cat >> /etc/sysctl.conf << EOF
net.core.wmem_default = 1746400
net.core.wmem_max = 3492800
net.core.rmem_default = 1746400
net.core.rmem_max = 3492800
net.core.netdev_max_backlog = 16384
net.core.somaxconn = 16384
net.ipv4.ip_forward = 1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_max_orphans = 16384
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 30     
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.route.gc_timeout = 100
fs.file-max = 6815744
kernel.pid_max=1000000
fs.inotify.max_user_watches = 524288
fs.inotify.max_user_instances = 256
fs.inotify.max_queued_events = 32768
EOF

[root@localhost ~]# sysctl -p 

二、容器相关基础配置

2.1、容器运行的相关依赖



1、关闭SWAP分区
[root@localhost ~]# swapoff -a
[root@localhost ~]# cp /etc/fstab /etc/fstab.bak
[root@localhost ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@localhost ~]# cat /etc/fstab


2、加载 br_netfilter 模块
[root@localhost ~]# modprobe br_netfilter          #  临时加载 netfilter 模块
[root@localhost ~]# lsmod | grep br_netfilter
br_netfilter           28672  0


3、手动模块载入IPVS 
[root@localhost ~]# yum -y install ipset ipvsadm
[root@localhost ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack  
modprobe -- br_netfilter      #  开机自动加载 netfilter 模块
EOF

[root@localhost ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules 
[root@localhost ~]# source /etc/sysconfig/modules/ipvs.modules 
[root@localhost ~]# lsmod | grep -e ip_vs -e nf_conntrack


4、将桥接的IPv4流量传递到iptables的链
[root@localhost ~]# cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF   
[root@localhost ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

[root@localhost ~]#  reboot

2.2、操作系统内核相关


[root@localhost ~]# uname -r
3.10.0-1160.el7.x86_64

[root@localhost ~]# wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.278-1.el7.elrepo.x86_64.rpm
[root@localhost ~]# wget http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/kernel-lt-5.4.278-1.el7.elrepo.x86_64.rpm
[root@localhost ~]# rpm -ivh kernel*


[root@localhost ~]# awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
CentOS Linux (5.4.278-1.el7.elrepo.x86_64) 7 (Core)
CentOS Linux (3.10.0-1160.el7.x86_64) 7 (Core)
CentOS Linux (0-rescue-482a03c6354c417b8c60289c82a50fed) 7 (Core)

[root@localhost ~]#  grub2-set-default 0
[root@localhost ~]#  grub2-mkconfig -o /boot/grub2/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-5.4.278-1.el7.elrepo.x86_64
Found initrd image: /boot/initramfs-5.4.278-1.el7.elrepo.x86_64.img
Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64
Found initrd image: /boot/initramfs-3.10.0-1160.el7.x86_64.img
Found linux image: /boot/vmlinuz-0-rescue-482a03c6354c417b8c60289c82a50fed
Found initrd image: /boot/initramfs-0-rescue-482a03c6354c417b8c60289c82a50fed.img
done

[root@localhost ~]# reboot
 
[root@localhost ~]# uname -r
5.4.278-1.el7.elrepo.x86_64

三、进行容器的安装配置

3.1、容器的安装


1、安装Docker依赖、配置 YUM仓库

[root@localhost ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@localhost ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@localhost ~]# yum makecache fast

2、安装指定版本的Docker容器

[root@localhost ~]# yum list docker-ce --showduplicates|sort -r       #  查看有哪些版本
[root@localhost ~]# yum install -y docker-ce-19.03.9 docker-ce-cli-19.03.9 containerd.io
[root@localhost ~]# systemctl start docker && systemctl enable docker

3.2、容器配置调整



# 配置加速器、镜像仓库等 (根据实际情况配置)

[root@localhost ~]# sed -i '/ExecStart/s/.*/& --exec-opt native.cgroupdriver=systemd/' /usr/lib/systemd/system/docker.service

[root@localhost ~]# mkdir /etc/docker
[root@localhost ~]# vim /etc/docker/daemon.json 
{
  "registry-mirrors":["https://docker.ckyl.me","https://docker.1panel.live","https://do.nark.eu.org"]
 
}

### 文件内容简要说明(根据实际情况编写)
1、registry-mirrors:docker 加速器地址
2、insecure-registries: docker镜像私有仓库地址


[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart docker
[root@localhost ~]# docker info | grep "Cgroup Driver"
Cgroup Driver: systemd


四、Kubernetes 集群部署

4.1、安装 Kubernetes 集群


1、配置 K8S 的YUM仓库
[root@localhost ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

[root@localhost ~]# yum makecache fast


2、安装指定版本的 K8S
[root@localhost ~]# yum list kubelet kubeadm kubectl --showduplicates|sort -r       # 查看有哪些版本可以安装
[root@localhost ~]# yum install -y  kubeadm-1.20.1 kubelet-1.20.1 kubectl-1.20.1

[root@localhost ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.1", GitCommit:"af46c47ce925f4c4ad5cc8d1fca46c7b77d13b38", GitTreeState:"clean", BuildDate:"2020-12-08T17:57:36Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}

[root@localhost ~]# vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl enable kubelet 
[root@localhost ~]# systemctl restart kubelet


4.2、更改集群证书期限(10年)



1、安装编译环境(GO语言环境)
[root@master ~]# wget https://dl.google.com/go/go1.16.4.linux-amd64.tar.gz
[root@master ~]# tar -zxvf go1.16.4.linux-amd64.tar.gz -C /usr/local/
[root@master ~]# echo "export PATH=$PATH:/usr/local/go/bin" >> /etc/profile
[root@master ~]# source /etc/profile
[root@master ~]# go version
go version go1.16.4 linux/amd64


2、下面相关程序包
[root@master ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.1", GitCommit:"c4d752765b3bbac2237bf87cf0b1c2e307844666", GitTreeState:"archive", BuildDate:"2022-12-09T18:12:16Z", GoVersion:"go1.16.4", Compiler:"gc", Platform:"linux/amd64"}
[root@master ~]# wget https://github.com/kubernetes/kubernetes/archive/v1.20.1.tar.gz –no-check-certificate
[root@master ~]# tar -zxvf v1.20.1.tar.gz -C /mnt


3、更改相关配置
[root@master ~]# mv /mnt/kubernetes-1.20.1/ /mnt/kubernetes
[root@master ~]# cd /mnt/kubernetes/cmd/kubeadm/app/constants

# 将证书期限改为 10 年,更改下面的文件
[root@master constants]# cat constants.go | grep 365
CertificateValidity = time.Hour * 24 * 365
[root@master constants]# sed -i 's/365/365 * 10/' constants.go
[root@master constants]# cat constants.go | grep 365
CertificateValidity = time.Hour * 24 * 365 * 10


# 将证书期限改为 100 年,要更改下面的两个文件文件
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[root@master constants]# sed -i 's/365/365 * 100/' constants.go
[root@master constants]# cat constants.go | grep 365
CertificateValidity = time.Hour * 24 * 365 * 100

[root@master ~]# cd /mnt/kubernetes/staging/src/k8s.io/client-go/util/cert
[root@master cert]# cp cert.go cert.go.bak
[root@master cert]# cat cert.go | grep -E 'NotAfter.*365d'
NotAfter:              now.Add(duration365d * 10).UTC(),
[root@master constants]# sed -i 's/365 * 10/365 * 100/' cp cert.go
[root@master cert]# cat cert.go | grep -E 'NotAfter.*365d'
NotAfter:              now.Add(duration365d * 100).UTC(),
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 
4、进行编译生成新的二进制文件 kubeadm 
[root@master ~]# yum -y install gcc make 
[root@master ~]# cd /mnt/kubernetes/
[root@master ~]# make all WHAT=cmd/kubectl GOFLAGS=-v
[root@master ~]# cd _output/bin/
[root@master ~]# ls | grep kubeadm
-rwxr--r-- 1 root root  39325696 Dec 10 02:13 kubeadm


5、替换老的 kubeadm 文件,准备集群初始化
[root@master ~]# mv /usr/bin/kubeadm /usr/bin/kubeadm.old
[root@master ~]# cp /mnt/kubernetes/_output/bin/kubeadm /usr/bin/kubeadm
[root@master ~]# chmod +x /usr/bin/kubeadm

4.3、集群进行初始化

4.3.1、准备好镜像文件

### 此项为可选项,非必须操作 

1、 查看准备安装的集群版本需要的镜像文件及具体版本
[root@master ~]# kubeadm config images list --kubernetes-version=v1.20.1
k8s.gcr.io/kube-apiserver:v1.20.1
k8s.gcr.io/kube-controller-manager:v1.20.1
k8s.gcr.io/kube-scheduler:v1.20.1
k8s.gcr.io/kube-proxy:v1.20.1
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0


4.3.2、进行初始化操作

1、进行集群初始化操作 (初始化的时间和网络有关)
### 记录下初始化过程中生成的 Token,后面从节点加入集群要使用到

[root@master ~]# kubeadm init --kubernetes-version v1.20.1 --apiserver-advertise-address=192.168.0.110 
                 --image-repository registry.aliyuncs.com/google_containers --service-cidr=10.96.0.0/16 
                 --pod-network-cidr=10.244.0.0/16  --apiserver-cert-extra-sans XXX.XX.XX.XXX


### 公有云上的集群初始化的提示
#   基于大部分集群只有 master 节点有公网地址,因此 初始化的时候 ,带上 --apiserver-cert-extra-sans 参数,即 公网IP地址


### 初始化部分输出如下
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:
  export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
   
kubeadm join 192.168.0.110:6443 --token 1reov4.btph7px90d582nu2 \
    --discovery-token-ca-cert-hash sha256:04a72f1c3f4e4ff89bccc7cdf1d2850fe8a266fc8c6a40880d979c411ac50821 
   
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    
       
2、配置 Kubectl 命令工具 
[root@master ~]# mkdir /root/.kube/
[root@master ~]# cp -i /etc/kubernetes/admin.conf /root/.kube/config
[root@master ~]# chown $(id -u):$(id -g) $HOME/.kube/config        # 此操作可省略


3、开启K8S支持 IPVS 功能 
[root@master ~]# kubectl edit cm kube-proxy -n kube-system
 修改  mode: " " 为  mode: "ipvs"


4、配置集群网络
[root@master ~]# wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml --no-check-certificate
[root@master ~]# sed -i 's/192.168/10.244/' calico.yaml
[root@master ~]# kubectl apply -f calico.yaml

[root@master ~]# kubectl get nodes   ( 稍等十几秒左右 )
NAME     STATUS   ROLES                  AGE     VERSION
master   Ready    control-plane,master   3m10s   v1.20.0

4.4、验证集群证书时间



[root@master ~]# openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep ' Not '
            Not Before: Apr 19 07:54:19 2023 GMT
            Not After : Apr 16 06:33:32 2033 GMT

[root@master ~]# kubeadm alpha certs check-expiration
Command "check-expiration" is deprecated, please use the same command under "kubeadm certs"
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'

CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
admin.conf                 Dec 19, 2032 06:33 UTC   9y                                      no      
apiserver                  Dec 19, 2032 06:33 UTC   9y              ca                      no      
apiserver-etcd-client      Dec 19, 2032 06:33 UTC   9y              etcd-ca                 no      
apiserver-kubelet-client   Dec 19, 2032 06:33 UTC   9y              ca                      no      
controller-manager.conf    Dec 19, 2032 06:33 UTC   9y                                      no      
etcd-healthcheck-client    Dec 19, 2032 06:33 UTC   9y              etcd-ca                 no      
etcd-peer                  Dec 19, 2032 06:33 UTC   9y              etcd-ca                 no      
etcd-server                Dec 19, 2032 06:33 UTC   9y              etcd-ca                 no      
front-proxy-client         Dec 19, 2032 06:33 UTC   9y              front-proxy-ca          no      
scheduler.conf             Dec 19, 2032 06:33 UTC   9y                                      no      

CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
ca                      Dec 06, 2032 07:54 UTC   9y              no      
etcd-ca                 Dec 06, 2032 07:54 UTC   9y              no      
front-proxy-ca          Dec 06, 2032 07:54 UTC   9y              no


4.5、其它节点加入集群


1、从节点加入集群 (从节点上操作)
[root@node01 ~]# kubeadm join 192.168.0.110:6443 --token 1reov4.btph7px90d582nu2     
--discovery-token-ca-cert-hash sha256:04a72f1c3f4e4ff89bccc7cdf1d2850fe8a266fc8c6a40880d979c411ac50821

[root@node02 ~]# kubeadm join 192.168.0.110:6443 --token 1reov4.btph7px90d582nu2 
--discovery-token-ca-cert-hash sha256:04a72f1c3f4e4ff89bccc7cdf1d2850fe8a266fc8c6a40880d979c411ac50821


2、Master上查看集群状态
[root@master ~]# kubectl get nodes   
NAME     STATUS   ROLES                  AGE     VERSION
master   Ready    control-plane,master   2m10s   v1.20.0
node01   Ready    <none>                 1m14s   v1.20.0
node02   Ready    <none>                 1m25s   v1.20.0


3、后续再加入新节点的操作 --- 需要再建新的 Toekn
#  集群初始化时生成的 Token 有效期只有 24 小时,后续如有新节点要加入集群时,Token 就失效了,所以得创建 Token
#  集群再初始化会清空所有的数据,而创建 Token 不会,也不影响以前加入到集群的各个节点

[root@master ~]# kubeadm token create --print-join-command
kubeadm join 192.168.0.110:6443 --token t6w66b.7kd3ke5lvthl6ejt     --discovery-token-ca-cert-hash sha256:8103040effb16b4b080cc66096dbfc3194de9649ad7db351fba833d084356a94

# 新节点想加入集群,就使用上面新生成的 Toekn ,就能够正常加入集群了,前提是本文档中前面的(一、二、三、四)相关操作都已完成


4.6、检查集群的健康状态



[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0               Healthy     {"health":"true"}


[root@master ~]# vim /etc/kubernetes/manifests/kube-scheduler.yaml
- --port=0       #  注释或者删掉此行

[root@master ~]# vim /etc/kubernetes/manifests/kube-controller-manager.yaml
- --port=0       #  注释或者删掉此行

[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart kubelet

[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}


4.7、验证IPVS模块启用


[root@master ~]# kubectl get pod -n kube-system | grep kube-proxy
NAME                                      READY   STATUS    RESTARTS   AGE
kube-proxy-7vtsf                          1/1     Running   1          14m
kube-proxy-8k6j6                          1/1     Running   1          15m
kube-proxy-k4kgz                          1/1     Running   1          14m

[root@master ~]# kubectl logs kube-proxy-7vtsf -n kube-system | grep ipvs     ### 三个都查看
[root@master ~]# kubectl logs kube-proxy-8k6j6 -n kube-system | grep ipvs
[root@master ~]# kubectl logs kube-proxy-k4kgz -n kube-system | grep ipvs

# 三个都输出 “ Using ipvs Proxier ”,则都是已经在使用IPVS模式
# 三个都没输出或者其中一个、二个没输出,则 Delete 这个 POD ,再检查新生成的 POD ,如下所示:

# 三个都删除的情况如下
[root@master ~]# kubectl delete pod kube-proxy-7vtsf kube-proxy-8k6j6 kube-proxy-k4kgz -n kube-system
pod "kube-proxy-7vtsf" deleted
pod "kube-proxy-8k6j6" deleted
pod "kube-proxy-k4kgz" deleted

[root@master ~]# kubectl get pod -n kube-system | grep kube-proxy   (略等几秒钟时间)
kube-proxy-2nqcg                          1/1     Running   0          19s
kube-proxy-9v99s                          1/1     Running   0          16s
kube-proxy-smz8k                          1/1     Running   0          11s

[root@master ~]# kubectl logs kube-proxy-2nqcg -n kube-system | grep ipvs
I0907 02:28:24.357562       1 server_others.go:258] Using ipvs Proxier.

[root@master ~]# kubectl logs kube-proxy-29v99s -n kube-system | grep ipvs
I0907 02:28:24.357562       1 server_others.go:258] Using ipvs Proxier.

[root@master ~]# kubectl logs kube-proxy-smz8k -n kube-system | grep ipvs
I0907 02:28:24.357562       1 server_others.go:258] Using ipvs Proxier.


五、集群部署相关扩展操作

5.1、更改集群使用端口范围


[root@master ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
 - --service-node-port-range=1024-65535       # 增加上此行 

[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart kubelet

5.2、Kubectl 命令的自动补全


1、安装 bash-completion 包
[root@master ~]# yum install -y bash-completion
[root@master ~]# source /usr/share/bash-completion/bash_completion
[root@master ~]# source <(kubectl completion bash)
[root@master ~]# echo “source <(kubectl completion bash)” >> ~/.bashrc


# 以上是临时启用,想永久启用,在 /root/.bashrc 文件末尾追加下面两行
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)


2、测试是否能够补全

[root@master ~]# kubectl get d
daemonsets.apps   deployments.apps

[root@master ~]# kubectl get p
persistentvolumeclaims             poddisruptionbudgets.policy        podsecuritypolicies.policy         priorityclasses.scheduling.k8s.io  
persistentvolumes                  pods                 podtemplates   

5.3、从节点使用 Kubectl 命令

5.3.1、具体现象表现

### 默认情况下,只能够在 MASTER 节点查看 NODE、SERVICES、POD等信息,在 NODE 节点查看会报错如下

[root@node01 ~]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?

[root@node02 ~]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?

5.3.2、相关解决办法

[root@node01 ~]# mkdir /root/.kube

[root@master ~]# scp -r /root/.kube/config node01:/root/.kube/
[root@master ~]# scp -r /root/.kube/config node02:/root/.kube/

[root@node01 ~]# kubectl get nodes
NAME     STATUS   ROLES    AGE     VERSION
master   Ready    master   4d23h   v1.20.0
node01   Ready    <none>   4d23h   v1.20.0
node02   Ready    <none>   4d23h   v1.20.0