一、查看k8s集群kube-proxy的代理模式

1.访问kube-proxy的接口

与 iptables 模式下的 kube-proxy 相比,IPVS 模式下的 kube-proxy 重定向通信的延迟要短,并且在同步代理规则时具有更好的性能。 与其他代理模式相比,IPVS 模式还支持更高的网络流量吞吐量。

正常返回iptables或者ipvs

curl localhost:10249/proxyMode

2.查看kube-proxy的配置文件

配置文件中显示了当前kube-proxy的代理模式。

kubectl get cm kube-proxy -n kube-system -o yaml | grep mode

二、更改kube-proxy工作模式(ubuntu)

因为kube-proxy 的配置是通过 ConfigMap 完成的,所以我们只需要更改 name=kube-proxy 的 ConfigmMap 然后删除 所有 拥有标签k8s-app=kube-proxy(更改后不会立即生效,需要删除后,自动重建pod)的pod,即可生效。当然针对每种工作模式,是有前提要求的,咱们以ipvs为例讲解

1.安装ipvs

# 1 安装ipset和ipvsadm
[root@kht110 ~]# sudo apt-get install ipvsadm ipset  -y
# 2 添加需要加载的模块写入脚本文件
[root@kht110 ~]# sudo cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs_dh
ip_vs_fo
ip_vs_ftp
ip_vs
ip_vs_1blc
ip_vs_1blcr
ip_vs_lc
ip_vs_mh
ip_vs_nq
ip_vs_ovf
ip_vs_pe_sip
ip_vs_rr
ip_vs_sed
ip_vs_sh
ip_vs_wlc
ip_vs_wrr
nf_conntrack
EOF
[root@kht110 ~]# systemctl enable --now systemd-modules-load.service
[root@kht110 ~]# reboot
[root@kht110 ~]# lsmod | grep -e ip_vs
ip_vs_wrr              16384  0
ip_vs_wlc              16384  0
ip_vs_sh               16384  0
ip_vs_sed              16384  0
ip_vs_rr               16384  0
ip_vs_pe_sip           16384  0
nf_conntrack_sip       36864  1 ip_vs_pe_sip
ip_vs_ovf              16384  0
ip_vs_nq               16384  0
ip_vs_mh               16384  0
ip_vs_lc               16384  0
ip_vs_ftp              16384  0
nf_nat                 45056  1 ip_vs_ftp
ip_vs_fo               16384  0
ip_vs_dh               16384  0
ip_vs                 155648  26 ip_vs_wlc,ip_vs_rr,ip_vs_dh,ip_vs_sh,ip_vs_ovf,ip_vs_fo,ip_vs_nq,ip_vs_pe_sip,ip_vs_wrr,ip_vs_lc,ip_vs_mh,ip_vs_sed,ip_vs_ftp
nf_conntrack          139264  3 nf_nat,nf_conntrack_sip,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
libcrc32c              16384  5 nf_conntrack,nf_nat,btrfs,raid456,ip_vs

2.更改ConigMap

# 在kube-system命名空间中查找 name=kube-proxy 的 ConfigMap(cm)
[root@kht110 ~]# kubectl get cm -n kube-system
NAME                                 DATA   AGE
calico-config                        4      12d
coredns                              1      12d
extension-apiserver-authentication   6      12d
kube-proxy                           2      12d
kube-root-ca.crt                     1      12d
kubeadm-config                       1      12d
kubelet-config                       1      12d
# 编辑 大概47行 修改 mode=ipvs 保存
[root@kht110 ~]# kubectl edit cm kube-proxy -n kube-system
configmap/kube-proxy edited

k8s容器里面固定ip k8s 固定pod ip_bash

3.删除kube-system

由于更改了配置文件不会立即生效,将所有kube-system pod删除,k8s会立即新建kube-system pod这个时候新的配置即可生效

#查看 kube-system 命名空间下所有的 pod 可发现 3个以“kube-proxy-” 为前缀的pod,是因为我本地只有3个k8s节点,这三个节点都有相同的label叫“k8s-app=kube-proxy” 那咱们通过label删除即可全部删除
[root@kht110 ~]# kubectl get pod -n kube-system --show-labels
NAME                                       READY   STATUS    RESTARTS   AGE    LABELS
calico-kube-controllers-59697b644f-bqhsg   1/1     Running   0          12d    k8s-app=calico-kube-controllers,pod-template-hash=59697b644f
calico-node-6x9rq                          1/1     Running   0          12d    controller-revision-hash=55f6f6844b,k8s-app=calico-node,pod-template-generation=1
calico-node-9npwl                          1/1     Running   0          12d    controller-revision-hash=55f6f6844b,k8s-app=calico-node,pod-template-generation=1
calico-node-s9g7k                          1/1     Running   0          12d    controller-revision-hash=55f6f6844b,k8s-app=calico-node,pod-template-generation=1
coredns-c676cc86f-n4nj8                    1/1     Running   0          12d    k8s-app=kube-dns,pod-template-hash=c676cc86f
coredns-c676cc86f-rhvwg                    1/1     Running   0          12d    k8s-app=kube-dns,pod-template-hash=c676cc86f
etcd-k8s-master                            1/1     Running   0          12d    component=etcd,tier=control-plane
kube-apiserver-k8s-master                  1/1     Running   0          12d    component=kube-apiserver,tier=control-plane
kube-controller-manager-k8s-master         1/1     Running   0          12d    component=kube-controller-manager,tier=control-plane
kube-proxy-2jk2g                           1/1     Running   0          16m    controller-revision-hash=dd4c999cf,k8s-app=kube-proxy,pod-template-generation=1
kube-proxy-h5tgq                           1/1     Running   0          16m    controller-revision-hash=dd4c999cf,k8s-app=kube-proxy,pod-template-generation=1
kube-proxy-nbmv2                           1/1     Running   0          16m    controller-revision-hash=dd4c999cf,k8s-app=kube-proxy,pod-template-generation=1
kube-scheduler-k8s-master                  1/1     Running   0          12d    component=kube-scheduler,tier=control-plane
metrics-server-f68c598fc-vt4pz             1/1     Running   0          2d2h   k8s-app=metrics-server,pod-template-hash=f68c598fc
 
# 通过label 删除所有kube-proxy pod
[root@kht110 ~]# kubectl delete pod -l k8s-app=kube-proxy -n kube-system
pod "kube-proxy-2jk2g" deleted
pod "kube-proxy-h5tgq" deleted
pod "kube-proxy-nbmv2" deleted
[root@kht110 ~]#
# 再次查看 发现 kube-proxy pod已新建
[root@kht110 ~]# kubectl get pod -n kube-system --show-labels
NAME                                       READY   STATUS    RESTARTS   AGE    LABELS
calico-kube-controllers-59697b644f-bqhsg   1/1     Running   0          12d    k8s-app=calico-kube-controllers,pod-template-hash=59697b644f
calico-node-6x9rq                          1/1     Running   0          12d    controller-revision-hash=55f6f6844b,k8s-app=calico-node,pod-template-generation=1
calico-node-9npwl                          1/1     Running   0          12d    controller-revision-hash=55f6f6844b,k8s-app=calico-node,pod-template-generation=1
calico-node-s9g7k                          1/1     Running   0          12d    controller-revision-hash=55f6f6844b,k8s-app=calico-node,pod-template-generation=1
coredns-c676cc86f-n4nj8                    1/1     Running   0          12d    k8s-app=kube-dns,pod-template-hash=c676cc86f
coredns-c676cc86f-rhvwg                    1/1     Running   0          12d    k8s-app=kube-dns,pod-template-hash=c676cc86f
etcd-k8s-master                            1/1     Running   0          12d    component=etcd,tier=control-plane
kube-apiserver-k8s-master                  1/1     Running   0          12d    component=kube-apiserver,tier=control-plane
kube-controller-manager-k8s-master         1/1     Running   0          12d    component=kube-controller-manager,tier=control-plane
kube-proxy-57d4m                           1/1     Running   0          3s     controller-revision-hash=dd4c999cf,k8s-app=kube-proxy,pod-template-generation=1
kube-proxy-5zxrg                           1/1     Running   0          3s     controller-revision-hash=dd4c999cf,k8s-app=kube-proxy,pod-template-generation=1
kube-proxy-z2fpg                           1/1     Running   0          3s     controller-revision-hash=dd4c999cf,k8s-app=kube-proxy,pod-template-generation=1
kube-scheduler-k8s-master                  1/1     Running   0          12d    component=kube-scheduler,tier=control-plane
metrics-server-f68c598fc-vt4pz             1/1     Running   0          2d2h   k8s-app=metrics-server,pod-template-hash=f68c598fc

4.通过ipvsadm查看流量转发信息

[root@kht110 ~]#  ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.17.0.1:30934 rr
  -> 192.168.2.110:443            Masq    1      0          0
TCP  172.17.0.1:31222 rr
  -> 192.168.2.110:80             Masq    1      0          0
TCP  192.168.2.110:30934 rr
  -> 192.168.2.110:443            Masq    1      0          0
TCP  192.168.2.110:31222 rr
  -> 192.168.2.110:80             Masq    1      0          0
TCP  10.96.0.1:443 rr
  -> 192.168.2.110:6443           Masq    1      1          0
TCP  10.96.0.10:53 rr
  -> 10.244.219.82:53             Masq    1      0          0
  -> 10.244.219.83:53             Masq    1      0          0
TCP  10.96.0.10:9153 rr
  -> 10.244.219.82:9153           Masq    1      0          0
  -> 10.244.219.83:9153           Masq    1      0          0
TCP  10.96.180.135:443 rr
  -> 192.168.2.110:8443           Masq    1      0          0
TCP  10.105.193.137:80 rr
  -> 192.168.2.110:80             Masq    1      0          0
TCP  10.105.193.137:443 rr
  -> 192.168.2.110:443            Masq    1      0          0
TCP  10.244.219.64:30934 rr
  -> 192.168.2.110:443            Masq    1      0          0
TCP  10.244.219.64:31222 rr
  -> 192.168.2.110:80             Masq    1      0          0
UDP  10.96.0.10:53 rr
  -> 10.244.219.82:53             Masq    1      0          0
  -> 10.244.219.83:53             Masq    1      0          0
[root@kht110 ~]# curl localhost:10249/proxyMode
ipvs root@master:~#

5.补充说明

#ipvs模式需要启动IPVS时依赖模块:
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack

#可通过如下命令确定系统是否启用了这些模块
lsmod | grep -e ip_vs -e nf_conntrack

#如果没有启用,通过如下命令启用
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

三、更改kubelet的CGroup驱动

1.修改配置文件(方式一)

配置文件位置/var/lib/kubelet/config.yaml

k8s容器里面固定ip k8s 固定pod ip_2d_02

2.修改配置文件(方式二)

vim /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS=“–cgroup-driver=systemd”
systemctl daemon-reload
systemctl restart kubelet

cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl restart kubelet

四、命令自动补全

#命令自动补全
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
source <(helm completion bash)
echo "source <(helm completion bash)" >> ~/.bashrc
#CentOS下如果自动补全出错,安装bash-completion
yum -y install bash-completion

常见问题解决

#使用自动补全时出现错误,现象如下
[root@k8smaster ~]# kubectl get pod -n -bash: _get_comp_words_by_ref: command not found
#解决办法
yum -y install bash-completion
bash /usr/share/bash-completion/bash_completion
bash

五、修改node支持的最大pod数量

#查看kht116节点支持的最大pod数量(默认110)
[root@kht115 ~]# kubectl describe nodes kht116

k8s容器里面固定ip k8s 固定pod ip_2d_03

#修改/etc/sysconfig/kubelet,增加限制数量
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
KUBELET_EXTRA_ARGS="--fail-swap-on=false --max-pods=300"

k8s容器里面固定ip k8s 固定pod ip_k8s容器里面固定ip_04

cd /etc/systemd/system/multi-user.target.wants
vim kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/
Wants=network-online.target
After=network-online.target

[Service]
ExecStart=/usr/bin/kubelet $KUBELET_EXTRA_ARGS  # 增加变量
Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target

k8s容器里面固定ip k8s 固定pod ip_ico_05

#重启kubelet
systemctl daemon-reload
systemctl restart kubelet
#查看并确认
kubectl describe nodes kht116

六、去除master节点污点

使用kubeadm初始化的集群,出于安全考虑Pod不会被调度到Master Node上,也就是说Master Node不参与工作负载

#查看master的污点信息
root@master:~# kubectl describe nodes master |grep Taints
Taints:             node-role.kubernetes.io/master:NoSchedule
#去除master的污点信息
root@master:~# kubectl taint node --all node-role.kubernetes.io/master-
node/master untainted
taint "node-role.kubernetes.io/master" not found
taint "node-role.kubernetes.io/master" not found
#确认master的污点信息已去除
root@master:~# kubectl describe nodes master |grep Taints
Taints:             <none>

k8s容器里面固定ip k8s 固定pod ip_k8s容器里面固定ip_06

七、将master节点设置为也可以分配pod任务

去除master节点污点
尝试使用k8s18版本可行

root@k8smaster:~# kubectl taint node --all node-role.kubernetes.io/master-
node/k8smaster untainted
taint "node-role.kubernetes.io/master" not found
taint "node-role.kubernetes.io/master" not found
#使master节点不参与负载
root@k8smaster:~# kubectl taint nodes k8smaster node-role.kubernetes.io/master=:NoSchedule
kubectl taint nodes --all node-role.kubernetes.io/master-
#输出如下
node "k8s" untainted
#输出error: taint “node-role.kubernetes.io/master:” not found错误忽略。

#禁止master部署pod
kubectl taint nodes k8smaster node-role.kubernetes.io/master=true:NoSchedule
#有时需要对服务器进行重启或者进行配置,但是不想影响服务,可以对节点进行排水
kubectl drain node节点   # 所有pod会被迁移到其他节点
kubectl uncordon node节点 #恢复节点正常使用

#项目部署完成以后为方便运维可修改默认命名空间为部署的命名空间 
kubectl config set-context $(kubectl config current-context) --namespace=[namespace]

#如何获取mysql,redis,mongo等数据库的密钥
kubectl get secret *** -o yaml #获取secret信息

echo 'YVNkZ2U3OGhkLg==' | base64 --decode #解密secret

#批量删除evicted的pod,方便查看pod  
kubectl get pod -n [namespace]  |grep Evicted |awk '{print $1}' |xargs kubectl delete pod -n [namespace]

#从容器复制文件到本地
kubectl cp <namespace>/<pod>:<root_dir>/<parent_dir>/<file_name> ./<file_name>

#POD一直处于terminating状态时,如何强行删除POD
kubectl delete pod [pod name] --force --grace-period=0 -n [namespace]

#服务器数量少时,需要让master节点参与负载
kubectl taint nodes k8smaster node-role.kubernetes.io/master-

kubectl taint nodes k8smaster node-role.kubernetes.io/master=:NoSchedule  #使master节点不参与负载

#有时需要对服务器进行重启或者进行配置,但是不想影响服务,可以对节点进行排水
kubectl drain node节点   # 所有pod会被迁移到其他节点

kubectl uncordon node节点 #恢复节点正常使用

八、k8s部署https证书

kubectl create secret tls ops-secret --key secret/8791756_znyw.hangzhoupuyu.work.key --cert secret/8791756_znyw.hangzhoupuyu.work.pem -n ops

九、将子节点加入集群

重新生成加入子节点命令
重新生成新令牌

#在主节点打印加入集群命令
root@master:/kht# kubeadm token create --print-join-command
kubeadm join 192.168.2.110:6443 --token udje01.hr3l69gsfz77ivsl --discovery-token-ca-cert-hash sha256:e3c48d86584778061466bea3291e74ab95e857cbee365f21b64baf4ab7ed2f23

十、设置node节点可以使用kubectl命令

#1.将master的admin.conf 文件拷贝到 node节点
scp /etc/kubernetes/admin.conf root@192.168.2.110:/etc/kubernetes/
#2.配置环境变量
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
#3.测试
[root@node1 ~]# kubectl get nodes
NAME     STATUS   ROLES                  AGE   VERSION
k8s-m    Ready     master                10d   v1.22.16
node01   Ready    <none>                 10d   v1.22.16
node02   Ready    <none>                 10d   v1.22.16