版本介绍
CentOS 7.9 内核版本 5.4.101 升级内核操作请移步此链接
kubernetes v1.20.4
节点角色 | 节点介绍 | IP地址 |
---|---|---|
master | node-1 | 192.168.202.71 |
node | node-2 | 192.168.202.72 |
node | node-3 | 192.168.202.73 |
node | node-4 | 192.168.202.74 |
node | node5 | 192.168.202.75 |
1.各节点均需要执行的准备操作
# 升级内核版本至 CentOS7 的最信LT版本的内核
# 关闭swap:
# 临时关闭
swapoff -a
# 永久关闭(删除或注释掉swap那一行重启即可)
vim /etc/fstab
# 各节点使用 ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install ipvsadm ipset -y
# ipvsadm -ln
#可以看到service对应的很多规则
# 各节点分别执行设定主机名
[root@node-1 ~]# hostnamectl set-hostname node-1
...
# 各节点解析
[root@node-1 ~]# cat /etc/hosts
192.168.202.71 node-1
192.168.202.72 node-2
192.168.202.73 node-3
192.168.202.74 node-4
192.168.202.75 node-5
# node-1 节点设置为 master 节点,其余节点为 node 节点,需要设置 master 节点至 node 节点的免密登录
[root@node-1 ~]# ssh-keygen
[root@node-1 ~]# ssh-copy-id node-1
[root@node-1 ~]# ssh-copy-id node-2
[root@node-1 ~]# ssh-copy-id node-3
[root@node-1 ~]# ssh-copy-id node-4
[root@node-1 ~]# ssh-copy-id node-5
# 各节点优化参数
[root@node-1 ~]# tee /etc/sysctl.conf <<-"EOF"
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_fin_timeout = 30
net.core.somaxconn = 20480
net.core.netdev_max_backlog = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.ipv4.tcp_max_tw_buckets = 800000
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
EOF
# 安装docker
[root@node-1 ~]# wget -P /etc/yum.repos.d/ https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node-1 ~]# yum install docker-ce -y
# 修改 docker 的启动用户组
[root@node-1 ~]# sed -i s/SocketGroup=docker/SocketGroup=root/g /usr/lib/systemd/system/docker.socket
# 修改 docker 的 cgroup 驱动为 systemd
[root@node-1 ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": [
"https://by0gaj4f.mirror.aliyuncs.com"
],
"exec-opts": [
"native.cgroupdriver=systemd"
],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
[root@node-1 ~]# systemctl daemon-reload
[root@node-1 ~]# systemctl enable docker
[root@node-1 ~]# systemctl restart docker
[root@node-1 ~]# cat <<EOF > /etc/yum.repos.d/kubernets.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@node-1 ~]# for i in {2..5}; do scp /etc/yum.repos.d/kubernets.repo node-$i:/etc/yum.repos.d/ ; done
2.安装 kubernetes 相关组件
[root@node-1 ~]# yum help | grep show
--showduplicates 在 list/search 命令下,显示源里重复的条目
[root@node-1 ~]# yum list kubeadm --showduplicates
# 指定版本
[root@node-1 ~]# yum install kubeadm-1.20.4 kubectl-1.20.4 kubelet-1.20.4 -y
# 每个节点都需要执行
[root@node-1 ~]# systemctl enable kubelet
3.kubernetes 初始化集群
# master 节点执行
[root@node-1 ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.20.4
k8s.gcr.io/kube-controller-manager:v1.20.4
k8s.gcr.io/kube-scheduler:v1.20.4
k8s.gcr.io/kube-proxy:v1.20.4
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
[root@node-1 ~]# kubeadm init \
--apiserver-advertise-address=192.168.202.71 \
--apiserver-bind-port=6443 \
--image-repository="registry.aliyuncs.com/google_containers" \
--kubernetes-version="v1.20.4" \
--pod-network-cidr="10.244.0.0/16"
# 执行结果
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.202.71:6443 --token k1qppu.cnlypnu9krd9b6k1 \
--discovery-token-ca-cert-hash sha256:1dcf1d358ca12d7b042809cd47999df7fcd143a9f583b1c3a880d5a72191d783
# 执行上述命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# node 节点执行
kubeadm join 192.168.202.71:6443 --token k1qppu.cnlypnu9krd9b6k1 \
--discovery-token-ca-cert-hash sha256:1dcf1d358ca12d7b042809cd47999df7fcd143a9f583b1c3a880d5a72191d783
# 查看验证
[root@node-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node-1 NotReady control-plane,master 17m v1.20.4
node-2 NotReady <none> 6m59s v1.20.4
node-3 NotReady <none> 6m59s v1.20.4
node-4 NotReady <none> 6m59s v1.20.4
node-5 NotReady <none> 6m59s v1.20.4
4.安装网络插件 Flannel
flannel/kube-flannel.yml at master · flannel-io/flannel · GitHub
[root@node-1 ~]# kubectl apply -f kube-flannel.yml
# 状态修改为 Ready
[root@node-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node-1 Ready control-plane,master 35m v1.20.4
node-2 Ready <none> 24m v1.20.4
node-3 Ready <none> 24m v1.20.4
node-4 Ready <none> 24m v1.20.4
node-5 Ready <none> 24m v1.20.4
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.1-rc2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.1-rc2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
5.命令行补全
[root@node-1 ~]# kubectl completion bash > /etc/profile.d/kubectl.sh
[root@node-1 ~]# source /etc/profile.d/kubectl.sh
[root@node-1 ~]# echo "source /etc/profile.d/kubectl.sh" >> .bashrc
[root@node-1 ~]# cat .bashrc
# .bashrc
# User specific aliases and functions
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
source /etc/profile.d/kubectl.sh
6.查看健康状态并修改ipvs模型
[root@node-1 ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-7f89b7bc75-2cmfc 1/1 Running 0 46m 10.244.3.2 node-4 <none> <none>
coredns-7f89b7bc75-59zft 1/1 Running 0 46m 10.244.1.2 node-2 <none> <none>
etcd-node-1 1/1 Running 0 46m 192.168.202.71 node-1 <none> <none>
kube-apiserver-node-1 1/1 Running 0 46m 192.168.202.71 node-1 <none> <none>
kube-controller-manager-node-1 1/1 Running 0 46m 192.168.202.71 node-1 <none> <none>
kube-flannel-ds-4qq8g 1/1 Running 0 17m 192.168.202.72 node-2 <none> <none>
kube-flannel-ds-9pc7w 1/1 Running 0 17m 192.168.202.75 node-5 <none> <none>
kube-flannel-ds-hqkjv 1/1 Running 0 17m 192.168.202.74 node-4 <none> <none>
kube-flannel-ds-ncrh4 1/1 Running 0 17m 192.168.202.71 node-1 <none> <none>
kube-flannel-ds-qk77w 1/1 Running 0 17m 192.168.202.73 node-3 <none> <none>
kube-proxy-8qzg8 1/1 Running 0 35m 192.168.202.72 node-2 <none> <none>
kube-proxy-crklz 1/1 Running 0 35m 192.168.202.75 node-5 <none> <none>
kube-proxy-mb6n8 1/1 Running 0 35m 192.168.202.73 node-3 <none> <none>
kube-proxy-pwt6m 1/1 Running 0 46m 192.168.202.71 node-1 <none> <none>
kube-proxy-sdvnj 1/1 Running 0 35m 192.168.202.74 node-4 <none> <none>
kube-scheduler-node-1 1/1 Running 0 46m 192.168.202.71 node-1 <none> <none>
[root@node-1 ~]# kubectl get daemonsets.apps -A
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system kube-flannel-ds 5 5 5 5 5 <none> 17m
kube-system kube-proxy 5 5 5 5 5 kubernetes.io/os=linux 47m
[root@node-1 ~]# kubectl get service -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 47m
kube-system kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 47m
# 修改为ipvs模式
[root@node-1 ~]# kubectl edit configmap kube-proxy -n kube-system
minSyncPeriod: 0s
scheduler: ""
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs" # 修改此处
nodePortAddresses: null
# 删除所有kube-proxy的pod
[root@node-1 ~]# for i in `kubectl get pods -n kube-system | grep kube-proxy | awk '{print $1}'`; do kubectl delete pod $i -n kube-system ;done
pod "kube-proxy-8qzg8" deleted
pod "kube-proxy-crklz" deleted
pod "kube-proxy-mb6n8" deleted
pod "kube-proxy-pwt6m" deleted
pod "kube-proxy-sdvnj" deleted
[root@node-1 ~]# kubectl get pods -n kube-system | grep kube-proxy
kube-proxy-7ng6v 1/1 Running 0 18s
kube-proxy-fvkgv 1/1 Running 0 13s
kube-proxy-gvr7t 1/1 Running 0 11s
kube-proxy-j4wxh 1/1 Running 0 17s
kube-proxy-jqpn9 1/1 Running 0 3s
[root@node-1 ~]# kubectl logs -n kube-system kube-proxy-7ng6v
I0304 01:15:04.995911 1 server_others.go:258] Using ipvs Proxier.
# 验证
[root@node-1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 192.168.202.71:6443 Masq 1 0 0
TCP 10.96.0.10:53 rr
-> 10.244.1.2:53 Masq 1 0 0
-> 10.244.3.2:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 10.244.1.2:9153 Masq 1 0 0
-> 10.244.3.2:9153 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 10.244.1.2:53 Masq 1 0 0
-> 10.244.3.2:53 Masq 1 0 0
参考:
https://www.cnblogs.com/276815076/p/4673607.html
https://blog.csdn.net/CEVERY/article/details/109104447
https://blog.csdn.net/weixin_42914965/article/details/106213069