使用kubeadm离线安装k8s v1.25.3版本,使用containerd作为运行时

准备工作

安装版本

CentOS7 内核版本:3.10.0-1160.el7.x86_64 # uname -r

kubeadm v1.25.3 # kubeadm version

kubelet v1.25.3 # kubelet --version

kubectl v1.25.3 # kubectl version

containerd v1.6.9 # ctr version

镜像准备

在公网安装kubeadm和containerd,使用的代理地址和后面的kubeadm init时要保持一致。

# 查看需要的镜像,使用--image-repository参数指定代理地址
kubeadm config images list --image-repository registry.aliyuncs.com/google_containers

# 下载镜像,使用--image-repository参数指定代理地址
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers

# 查看containerd需要的镜像
containerd config default | grep sandbox_image
    sandbox_image = "registry.k8s.io/pause:3.6"

# 查看flannel需要镜像
cat kube-flannel.yml | grep image
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.1
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.1

镜像列表

可以使用docker save导出,必须使用ctr -n k8s.io命令导入

registry.aliyuncs.com/google_containers/kube-apiserver:v1.25.3
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.25.3
registry.aliyuncs.com/google_containers/kube-scheduler:v1.25.3
registry.aliyuncs.com/google_containers/kube-proxy:v1.25.3
registry.aliyuncs.com/google_containers/pause:3.8
registry.aliyuncs.com/google_containers/etcd:3.5.4-0
registry.aliyuncs.com/google_containers/coredns:v1.9.3

registry.k8s.io/pause:3.6

docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
docker.io/rancher/mirrored-flannelcni-flannel:v0.20.1

RPM包准备

rpm安装包列表

kubeadm

kubelet

kubectl

ipset

ipvsadm

使用repotrack命令下载rpm包及依赖,使用createrepo创建yum元数据文件。

repotrack -a x86_64 kubeadm kubelet kubectl ipset ipvsadm -p yumrepo/

createrepo yumrepo/

containerd二进制包准备

# containerd
wget https://github.com/containerd/containerd/releases/download/v1.6.9/containerd-1.6.9-linux-amd64.tar.gz

# containerd.service
wget https://raw.githubusercontent.com/containerd/containerd/containerd/main/containerd.service

# runc
wget https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64

# cni网络插件
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz

# container客户端工具,nerdctl和crictl一样,效果与docker命令语法一致,可以不用
wget https://github.com/containerd/nerdctl/releases/download/v1.0.0/nerdctl-1.0.0-linux-amd64.tar.gz

ingress-nginx 1.4.0准备

# 安装文件
wget https://github.com/kubernetes/ingress-nginx/archive/refs/tags/controller-v.1.4.0.tar.gz

# 镜像地址,部署使用地址
registry.k8s.io/ingress-nginx/controller:v1.4.0
registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343

# 可以使用镜像代理下载地址,然后修改tag后进行save
registry.aliyuncs.com/google_containers/nginx-ingress-controller:v1.4.0
registry.aliyuncs.com/google_containers/kube-webhook-certgen:v20220916-gd32f8c343

Linux初始化环境

网卡配置

[root@master ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens33 
DEVICE=ens33
ONBOOT=yes
BOOTPROTO=static
IPADDR=172.16.3.131
NETMASK=255.255.255.0
GATEWAY=172.16.3.254
DNS1=114.114.114.114
DNS2=8.8.8.8

# 关闭NetworkManager,启用network
systemctl stop NetworkManager
systemctl disable NetworkManager
systemctl restart network

上传文件到服务器

文件列表

containerd/ 5个文件

images/ 10个文件

rpm/ 205个文件

kube-flannel.yml 1个文件

配置本地yum源

# local为centos自带rpm,k8s为安装k8s过程需要的rpm包
mount /dev/cdrom /media/
mkdir /etc/yum.repos.d/backup
mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/backup

cat > /etc/yum.repos.d/local.repo << EOF
[centos7]
name=local
baseurl=file:///media/
enabled=1
gpgcheck=0

[centos7-k8s]
name=k8s
baseurl=file:///root/rpm/
enabled=1
gpgcheck=0
EOF

yum clean all && yum makecache && yum repolist
yum -y install net-tools vim

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

selinux关闭

setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

关闭交换分区

swapoff -a
sed -i '/swap/s/^/#/g' /etc/fstab

hostname设置

hostnamectl set-hostname master
echo "$(ifconfig ens33 | sed -n '2p' | awk '{print $2}') $(hostname)" >> /etc/hosts

内核参数调整

# 加载模块
modprobe overlay
modprobe br_netfilter
lsmod |grep -e overlay -e br_netfilter

# 开机加载模块
# br_netfilter模块可以是iptables规则在bridge上工作,没有该模式会影响node内pod之间通过service通信
cat > /etc/modules-load.d/k8s.conf << EOF
overlay
br_netfilter
EOF

# 调整系统参数
# bridge设备在二层转发时也调用iptables配置的三层规则(包含conntrack)
cat >> /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 使sysctl参数生效
sysctl --system

开启时间同步

centos7默认使用chronyd进行时间同步,本次使用单节点部署,未做配置,命令参考如下

systemctl status chronyd
timedatectl
chronyc sources
vim /etc/chrony.conf

k8s网络负载均衡模式使用ipvs代理模式

Linux kernel 4.19版本已经将nf_conntrack_ipv4更新为nf_conntrack,需要注意。

# 安装
yum -y install ipset ipvsadm

# 配置ipvs功能
cat >> /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_contrack

k8s v1.25.3部署安装

containerd安装

ctr是containerd本身的CLI工具

crictl是kubernetes社区定义的专门CLI工具

# 安装containerd
cd containerd
tar zxvf containerd-1.6.9-linux-amd64.tar.gz -C /usr/local/
cp containerd.service /usr/lib/systemd/system/
systemctl daemon-reload
systemctl enable --now containerd
ctr version
---
Client:
  Version:  v1.6.9
  Revision: 1c90a442489720eec95342e1789ee8a5e1b9536f
  Go version: go1.18.7

Server:
  Version:  v1.6.9
  Revision: 1c90a442489720eec95342e1789ee8a5e1b9536f
  UUID: 9492d653-76e1-4ba4-9adb-ddf3f55f127f


# 安装runC
install -m 755 runc.amd64 /usr/local/sbin/runc
runc -v
---
runc version 1.1.4
commit: v1.1.4-0-g5fd4c4d1
spec: 1.0.2-dev
go: go1.17.10
libseccomp: 2.5.4


# 安装cni插件,必须将cni解压到/opt/cni/bin目录,否则nerdctl为容器映射端口时,会出现找不到cni插件的报错
mkdir -p /opt/cni/bin
tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.1.1.tgz


# nerdctl安装,可以不安装
tar zxvf nerdctl-1.0.0-linux-amd64.tar.gz
cp nerdctl /usr/local/bin/


#########################
##  其他知识,不需要操作  ##
#########################
# 生成配置信息
mkdir /etc/containerd/
containerd config default > /etc/containerd/config.toml

# 修改镜像源
sed -i "s#registry.k8s.io/pause#registry.aliyuncs.com/google_containers/pause#g" /etc/containerd/config.toml
# 配置cgroup驱动程序systemd
sed -i 's#systemd_cgroup = false#systemd_cgroup = true#g' /etc/containerd/config.toml

grep -iE 'systemd_cgroup|sandbox_image' /etc/containerd/config.toml
systemctl restart containerd

镜像导入和修改标签

因为k8s 1.24之后使用containerd作为容器运行时,所以需要使用ctr命令导入镜像,不能使用docker load导入镜像

# ctr是containerd自带的工具,有命名空间的概念,k8s相关的镜像,默认在k8s.io这个命名空间,所以导入时需要指定命名空间
cd images
for i in $(ls); do ctr -n k8s.io images import $i; done;

# 修改镜像tag
ctr -n k8s.io image tag registry.aliyuncs.com/google_containers/pause:3.6 registry.k8s.io/pause:3.6

## 注意镜像名称,在init时,systemctl status containerd kubelet的报错

kubeadm安装配置

yum -y install kubeadm kubelet kubectl

# 修改kubelet的Cgroup和代理模式ipvs
cat > /etc/sysconfig/kubelet << EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF

# 配置kubelet使用containerd作为container runtimes
vim /usr/lib/systemd/system/kubelet.service
ExecStart=/usr/bin/kubelet --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock
sed -i '/^ExecStart/s#$# --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock#' /usr/lib/systemd/system/kubelet.service

# kubelet一直在重启是正常情况
systemctl daemon-reload
systemctl restart kubelet
systemctl enable kubelet


# crictl需要配置endpoint
crictl config runtime-endpoint unix:///run/containerd/containerd.sock
crictl config image-endpoint unix:///run/containerd/containerd.sock

kubeadm初始化(master安装)

kubeadm init \
  --apiserver-advertise-address=172.16.3.131 \
  --image-repository registry.aliyuncs.com/google_containers \
  --control-plane-endpoint=172.16.3.131 \
  --kubernetes-version v1.25.3 \
  --service-cidr=10.1.0.0/16 \
  --pod-network-cidr=10.244.0.0/16 \
  --v=6

# apiserver-advertise-address:指明用Master的哪个interface与Cluster的其他节点通信。如果Master有多个interface,建议明确指定,如果不指定,kubeadm 会自动选择有默认网关的interface。这里的ip为master节点ip,记得更换。

# image-repository:这个用于指定从什么位置来拉取镜像(1.13版本才有的),默认值是k8s.gcr.io,我们将其指定为国内镜像地址:registry.aliyuncs.com/google_containers,内网用不到

# kubernetes-version:指定kubenets版本号,默认值是stable-1,会导致从https://dl.k8s.io/release/stable-1.txt下载最新的版本号,我们可以将其指定为固定版本(v1.25.3)来跳过网络请求。

# pod-network-cidr:指定 Pod 网络的范围。Kubernetes 支持多种网络方案,而且不同网络方案对pod-network-cidr有自己的要求,这里设置为10.244.0.0/16 是因为我们将使用 flannel 网络方案,必须设置成这个 CIDR。

# control-plane-endpoint:cluster-endpoint是映射到该IP的自定义DNS名称,这里配置hosts映射:192.168.0.113   cluster-endpoint。 这将允许你将control-plane-endpoint=cluster-endpoint 传递给 kubeadm init,并将相同的 DNS 名称传递给 kubeadm join。 稍后你可以修改 cluster-endpoint 以指向高可用性方案中的负载均衡器的地址。

日志如下:

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 172.16.3.131:6443 --token 6usl6p.i8ztf34wezk31ddx \
	--discovery-token-ca-cert-hash sha256:c28548d135aade1e505ff7f06ba07354af2b0678f853a3d7119081fbd49c47bb \
	--control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.3.131:6443 --token 6usl6p.i8ztf34wezk31ddx \
	--discovery-token-ca-cert-hash sha256:c28548d135aade1e505ff7f06ba07354af2b0678f853a3d7119081fbd49c47bb

安装flannel v0.20.1版本

kubectl apply -f kube-flannel.yml

k8s命令自动补全

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

k8s启用ipvs模式

kubectl -n kube-system edit cm kube-proxy
# 修改mode为ipvs
    kind: KubeProxyConfiguration
    metricsBindAddress: ""
    mode: "ipvs"

# 重启kube-proxy的pod
kubectl -n kube-system rollout restart ds kube-proxy

# 查看日志是否有Using ipvs Proxier
kubectl -n kube-system logs kube-proxy-2fczv
I1107 16:03:34.312088       1 node.go:163] Successfully retrieved node IP: 172.16.3.131
I1107 16:03:34.312194       1 server_others.go:138] "Detected node IP" address="172.16.3.131"
I1107 16:03:34.332179       1 server_others.go:269] "Using ipvs Proxier"

ingress-nginx 1.4.0安装部署

# 导入镜像
ctr -n k8s.io images import controller_v1.4.0.tar kube-webhook-certgen_v20220916-gd32f8c343.tar
ctr -n k8s.io images import  kube-webhook-certgen_v20220916-gd32f8c343.tar

# yaml处理
tar zxvf ingress-nginx-controller-v1.4.0.tar.gz
cd ingress-nginx-controller-v1.4.0/deploy/static/provider/baremetal
# 需要将image里面的@sha256信息删除,不然会出现ErrImagePull
cat deploy.yaml | grep image
        image: registry.k8s.io/ingress-nginx/controller:v1.4.0
        imagePullPolicy: IfNotPresent
        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343
        imagePullPolicy: IfNotPresent
        image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343
        imagePullPolicy: IfNotPresent

# 安装部署
kubectl apply -f deploy.yaml

Ingress 1.19及之后版本apiVersion使用networking.k8s.io/v1,语法如下:

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
spec:
  rules:
  - host: nginx.ingress.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: ingress-nginx-controller
            port: 
              number: 80

pathType有3中类型:

ImplementationSpecific:匹配取决于IngressClass。可以将其视为一个单独的pathType或将其认为和Prefix或者Exact一样。

Exact:精确匹配URL路径,并且区分大小写。

Prefix:根据URL中的被/分割的前缀进行匹配,匹配区分大小写,并且按照元素对路径进行匹配。