1.环境准备:5台虚拟机
172.16.161.231 k8s-master231
172.16.161.232 k8s-master232
172.16.161.233 k8s-node233
172.16.161.234 k8s-node234
172.16.161.235 k8s-node235
2.设置Hostname
cat <<EOF > /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.161.231 k8s-master231
172.16.161.232 k8s-master232
172.16.161.233 k8s-node233
172.16.161.234 k8s-node234
172.16.161.235 k8s-node235
EOF

#以下命令行分别在每台虚拟机执行:
hostnamectl set-hostname k8s-master231
hostnamectl set-hostname k8s-master232
hostnamectl set-hostname k8s-node233
hostnamectl set-hostname k8s-node234
hostnamectl set-hostname k8s-node235
3.关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
4.添加 aliyun 源
yum install -y wget
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
5.添加k8s源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
6.重置yum源
yum clean all
yum makecache
7.同步时间
timedatectl set-timezone Asia/Shanghai
yum install ntpdate -y
ntpdate ntp1.aliyun.com
#可设置定时任务定时同步时间
8.关闭交换分区
sed -ri 's/.*swap.*/#&/' /etc/fstab  
swapoff -a
9.关闭selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
10.永久添加模块
cat <<EOF > /etc/modules-load.d/k8s.conf
overlay
br_netfilter
modprobe br_netfilter
modprobe overlay
EOF

lsmod | grep br_netfilter
11.修改内核参数
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

#这个命令的作用是应用 k8s.conf 文件中的内核参数设置,并且开启网络桥接的防火墙功能。其中 k8s.conf 文件中的内容包括以下三个参数设置:
#net.bridge.bridge-nf-call-iptables = 1 表示开启防火墙功能。
#net.bridge.bridge-nf-call-ip6tables = 1 表示开启 IPV6 的防火墙功能。
#net.ipv4.ip_forward = 1 表示开启 IP 转发功能。

sysctl -p /etc/sysctl.d/k8s.conf
#重新加载内核参数配置文件,以确保这些设置生效。
sysctl --system
12.安装ipvs模块
yum -y install ipset ipvsadm

cat <<EOF > /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh:
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
12.安装基础包
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlibdevel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet ipvsadm
13.修改运行时cri-docker配置(下载地址)
13.1创建cri-docker启动文件
cat << EOF >/usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF
13.2创建启动文件
cat << EOF > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF
13.3启动cri-docker并设置开机自动启动
##########把以上两份脚本复制到集群中的其他主机################

systemctl daemon-reload ; systemctl enable cri-docker --now
Created symlink from /etc/systemd/system/multi-user.target.wants/cri-docker.service to /usr/lib/systemd/system/cri-docker.service.

systemctl is-active cri-docker
--> active
14.运行时containerd配置
yum install containerd.io

mkdir -p /etc/containerd && \
containerd config default > /etc/containerd/config.toml && \
sed -i "s#k8s.gcr.io/pause#registry.aliyuncs.com/google_containers/pause#g"  /etc/containerd/config.toml  && \
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml  && \
sed -i "s#registry.k8s.io/pause:3.6#registry.aliyuncs.com/google_containers/pause:3.9#g" /etc/containerd/config.toml && \
sed -i '/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]' /etc/containerd/config.toml  && \
sed -i '/registry.mirrors."docker.io"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["http://hub-mirror.c.163.com"]' /etc/containerd/config.toml && \
sed -i '/hub-mirror.c.163.com"]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]' /etc/containerd/config.toml  && \
sed -i '/"k8s.gcr.io"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["http://registry.aliyuncs.com/google_containers"]' /etc/containerd/config.toml && \
sed -i '/google_containers"]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."122.9.142.69:5001"]' /etc/containerd/config.toml  && \
sed -i '/.registry.mirrors."122.9.142.69:5001"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["https://122.9.142.69:5001"]' /etc/containerd/config.toml && \
sed -i '/.registry.configs]/a\ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.configs."122.9.142.69:5001".tls]' /etc/containerd/config.toml  && \
sed -i '/."122.9.142.69:5001".tls]/a\ \ \ \ \ \ \ \ insecure_skip_verify = true' /etc/containerd/config.toml && \
sed -i '/insecure_skip_verify = true/a\ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.configs."122.9.142.69:5001".auth]' /etc/containerd/config.toml  && \
sed -i '/.configs."122.9.142.69:5001".auth]/a\ \ \ \ \ \ \ \ username = "admin"' /etc/containerd/config.toml && \
sed -i '/username = "admin"/a\ \ \ \ \ \ \ \ password = "liehooo"' /etc/containerd/config.toml && \
echo "===========restart containerd to reload config===========" && \
systemctl restart containerd



wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.27.1/crictl-v1.27.1-linux-amd64.tar.gz
tar zxvf crictl-v1.27.1-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-v1.27.1-linux-amd64.tar.gz

cat << EOF > /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 2
pull-image-on-create: false
EOF
15.安装kubernetes
#k8s的1.21版本用docker作为运行时,13/14步可以省略
#yum install -y kubelet-1.21.10 kubeadm-1.21.10 kubectl-1.21.10 --disableexcludes=kubernetesetes
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable --now kubelet

#Kubeadm: kubeadm 是一个工具,用来初始化 k8s 集群的
#kubelet: 安装在集群所有节点上,用于启动 Pod 的
#kubectl: 通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件

#查看版本
kubeadm version
16.生成配置脚本(注释的部分是要根据每台虚拟机的情况修改)
mkdir -p /root/kubernetes/
cd /root/kubernetes/
kubeadm config print init-defaults > kubeadm.yaml
vim /root/kubernetes/kubeadm.yaml


apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.100.231	  #master节点ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock      ##unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master231	  #主节点名
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers	#替换镜像仓库
kind: ClusterConfiguration
kubernetesVersion: 1.27.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16	  #pod 网段
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---  #新增
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---    #新增
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
17.初始化apiserver等依赖镜像
kubeadm config images pull --config=/root/kubernetes/kubeadm.yaml
18.初始化节点
18-1.初始化运行master节点(只在master主机上运行)
kubeadm init --config /root/kubernetes/kubeadm.yaml | tee /root/kubernetes/kubeadm-init.log

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

export KUBECONFIG=/etc/kubernetes/admin.conf


#添加master节点  ##--control-plane
1.把 k8s-master231 节点的证书拷贝到 k8s-master232 上
ssh k8s-master02 "cd /root && mkdir -p /etc/kubernetes/pki/etcd && mkdir -p ~/.kube/"
scp /etc/kubernetes/pki/ca.crt k8s-master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.key k8s-master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.key k8s-master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub k8s-master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.key k8s-master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt k8s-master02:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.key k8s-master02:/etc/kubernetes/pki/etcd/

2.添加controlPlaneEndpoint
kubectl -n kube-system edit cm kubeadm-config

kind: ClusterConfiguration
kubernetesVersion: v1.18.0
controlPlaneEndpoint: 192.168.2.124:6443//添加这个

3.执行加入语句 --control-plane
kubeadm join 172.16.161.231:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:50634a67eacc9642109ff4f83ad2b67cfa47499a09f6e3e71a1612613441e5af --control-plane
18-2.初始化运行node节点(只在node主机上运行)
##先在master节点上运行命令,获取节点添加的命令
kubeadm token create --print-join-command

kubeadm join 192.168.10.3:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:c02f54d9d78cb044a9bbb9190a4488f5b2a2ba4216d7379919a0adb71c9f4e49
19.网络插件Calico
##获取calico.yaml文件
curl https://docs.projectcalico.org/archive/v3.25/manifests/calico.yaml -O

##修改calico.yaml文件中的两个配置参数
- name: IP_AUTODETECTION_METHOD
  value: "interface=ens32"
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"

##执行calico.yaml配置文件
kubectl apply -f calico.yaml
##查看节点情
kubectl get pods -A
20.测试用例
vi nginx_replication.yaml

apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx
spec:
  replicas: 3
  selector:
    app: nginx
  template:
    metadata:
      name: nginx
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80


kubectl apply -f nginx_replication.yaml