本文章从k8s-1.19-3升级到1.20.0
1.查看k8s版本
kubectl version
kubeadm version
kubectl get nodes
2.备份集群(不需要备份就直接跳过)
可以直接从官网下载备份脚本,直接执行即可
$ mkdir -p /data
cd /data
git clone https://github.com/solomonxu/k8s-backup-restore.git
cd /data/k8s-backup-restore
./bin/k8s_backup.sh
3.升级集群
3.1查看kubeadm可用版本
yum list --showduplicates kubeadm --disableexcludes=kubernetes
3.2升级kubeadm
$ yum install -y kubeadm-1.20.0-0 --disableexcludes=kubernetes
3.3升级完成查看kubeadm版本
kubeadm version
root@k8s-kubmark .kube]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.0", GitCommit:"af46c47ce925f4c4ad5cc8d1fca46c7b77d13b38", GitTreeState:"clean", BuildDate:"2020-12-08T17:57:36Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}
3.4排空节点
kubectl cordon k8s-master
kubectl drain k8s-master
3.5执行升级计划
kubeadm upgrade plan
[root@k8s-kubmark .kube]# kubeadm upgrade apply v1.20.0
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade/version] You have chosen to change the cluster version to "v1.20.0"
[upgrade/versions] Cluster version: v1.19.3
[upgrade/versions] kubeadm version: v1.20.0
[upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y
[upgrade/prepull] Pulling images required for setting up a Kubernetes cluster
[upgrade/prepull] This might take a minute or two, depending on the speed of your internet connection
[upgrade/prepull] You can also perform this action in beforehand using 'kubeadm config images pull'
[upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.20.0"...
Static pod: kube-apiserver-k8s-kubmark hash: f045105e2d1249467e6d9f1edd60ac0a
Static pod: kube-controller-manager-k8s-kubmark hash: af652b9644a0ea152b6da25c4f55d209
Static pod: kube-scheduler-k8s-kubmark hash: ee4c94eb845abf1878fb3c4c489b1365
[upgrade/etcd] Upgrading to TLS for etcd
Static pod: etcd-k8s-kubmark hash: f3707d086c44b59697d68efb6256ba98
[upgrade/staticpods] Preparing for "etcd" upgrade
[upgrade/staticpods] Renewing etcd-server certificate
[upgrade/staticpods] Renewing etcd-peer certificate
[upgrade/staticpods] Renewing etcd-healthcheck-client certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2022-04-20-14-56-06/etcd.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: etcd-k8s-kubmark hash: f3707d086c44b59697d68efb6256ba98
Static pod: etcd-k8s-kubmark hash: f3707d086c44b59697d68efb6256ba98
Static pod: etcd-k8s-kubmark hash: 4336d47c59e0281cb869a92adde11714
[apiclient] Found 1 Pods for label selector component=etcd
[upgrade/staticpods] Component "etcd" upgraded successfully!
3.6升级可能遇到的问题
国外的镜像下载不了,
kubeadm config images list
[upgrade/prepull] You can also perform this action in beforehand using 'kubeadm config images pull'
可以下载国内的镜像然后重命名就可以了
docker pull tangxu/kube-apiserver:v1.20.0
docker pull tangxu/kube-controller-manager:v1.20.0
docker pull tangxu/kube-scheduler:v1.20.0
docker pull tangxu/kube-proxy:v1.20.0
docker pull tangxu/pause:3.2
docker pull tangxu/etcd:3.4.13-0
docker pull tangxu/coredns:1.7.0
docker tag tangxu/kube-apiserver:v1.20.0 k8s.gcr.io/kube-apiserver:v1.20.0
docker tag tangxu/kube-controller-manager:v1.20.0 k8s.gcr.io/kube-controller-manager:v1.20.0
docker tag tangxu/kube-scheduler:v1.20.0 k8s.gcr.io/kube-scheduler:v1.20.0
docker tag tangxu/kube-proxy:v1.20.0 k8s.gcr.io/kube-proxy:v1.20.0
docker tag tangxu/pause:3.2 k8s.gcr.io/pause:3.2
docker tag tangxu/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0
docker tag tangxu/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0
3.7升级kubelet跟kubectl
yum install -y kubelet-1.20.0-0 kubectl-1.20.0-0 --disableexcludes=kubernetes
重启kubelet
$ systemctl daemon-reload
$ systemctl restart kubelet
[root@k8s-kubmark .kube]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-kubmark Ready control-plane,master 22h v1.19.3 192.168.55.13 <none> CentOS Linux 7 (Core) 5.15.5-1.el7.elrepo.x86_64 docker://20.10.7
[root@k8s-kubmark .kube]# systemctl daemon-reload
[root@k8s-kubmark .kube]# systemctl restart kubelet
[root@k8s-kubmark .kube]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-kubmark Ready control-plane,master 22h v1.20.0 192.168.55.13 <none> CentOS Linux 7 (Core) 5.15.5-1.el7.elrepo.x86_64 docker://20.10.7
[root@k8s-kubmark .kube]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-kubmark Ready control-plane,master 22h v1.20.0 192.168.55.13 <none> CentOS Linux 7 (Core) 5.15.5-1.el7.elrepo.x86_64 docker://20.10.7
[root@k8s-kubmark .kube]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-kubmark Ready control-plane,master 22h v1.20.0 192.168.55.13 <none> CentOS Linux 7 (Core) 5.15.5-1.el7.elrepo.x86_64 docker://20.10.7
[root@k8s-kubmark .kube]#
4.升级节点
4.1升级kubeadm
yum install -y kubeadm-1.18.9-0 --disableexcludes=kubernetes
4.2设置节点不可调度并排空节点
$ kubectl cordon ecs-968f-0005
$ kubectl drain ecs-968f-0005
4.3升级节点
kubeadm upgrade node
4.4升级kubelet
yum install -y kubelet-1.18.9-0 --disableexcludes=kubernetes
4.5重启kubelet
$ systemctl daemon-reload
$ systemctl restart kubelet
5.验证集群
[root@k8s-master hdf]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane,master 60d v1.20.0
k8s-node01 Ready <none> 60d v1.20.0