实验环境
系统:openeuler 24.03
机器
192.168.10.61 jichao61 master
192.168.10.62 jichao62 master
192.168.10.63 jichao63 master
192.168.10.64 jichao64 worker
192.168.10.65 jichao65 worker
做好系统初始化操作
# 增加一个ipv4转发,不然初始化报错
sysctl -w net.ipv4.ip_forward=1
安装docker[all]
二进制安装docker-20.10.17
tar xf docker-*.tgz
cp docker/* /usr/bin/
#创建containerd的service文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service
#准备docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
#准备docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
#创建docker组
groupadd docker
#启动docker
systemctl enable --now docker.socket && systemctl enable --now docker.service
#验证
docker info
cat >/etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
],
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/var/lib/docker"
}
EOF
systemctl restart docker
安装cri-dockerd 垫片[all]
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1.amd64.tgz
# 解压cri-docker
tar -zxvf cri-dockerd-0.3.1.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd
# 写入启动配置文件
cat > /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 写入socket配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
# 进行启动cri-docker
systemctl daemon-reload
systemctl enable cri-docker --now
负载均衡高可用
安装keepalived 与 haproxy
配置keepalived
机器: jichao61 62 63
# 安装 keepalived haproxy
yum -y install keepalived haproxy
# 注意 这里的配置文件要改下名字
mv keepalived.conf.sample keepalived.conf
chmod 644 keepalived.conf
cd /etc/keepalived/
cp -p keepalived.conf keepalived.conf.bak
--- jichao61--------
cat >/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens160
mcast_src_ip 192.168.0.61
virtual_router_id 61
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.0.200
}
track_script {
chk_apiserver
}
}
EOF
service keepalived start
systemctl enable keepalived
# jichao62
cat >/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens160
mcast_src_ip 192.168.0.62
virtual_router_id 61
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.0.200
}
track_script {
chk_apiserver
}
}
EOF
service keepalived start
systemctl enable keepalived
# jichao63
cat >/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens160
mcast_src_ip 192.168.0.63
virtual_router_id 61
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.0.200
}
track_script {
chk_apiserver
}
}
EOF
service keepalived start
systemctl enable keepalived
健康检查脚本[3 nodes]
cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
查看负载IP
ip addr
测试 [ 61 ]
测试 keepalived的
ping 192.168.10.200
停掉jichao61的 keepalived 负载 VIP 会票移到jichao62 \ jichao 63上面
在启动flyfish61的keepalived vip 会自动切回来
service keepalived stop
service keepalived start
查看负载IP 地址有没有切回来
ip addr
配置haproxy[3nodes]
配置haproxy
修改 /etc/sysctl.conf
vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind=1
---
#刷新配置
sysctl -p
cd /etc/haproxy/
mv haproxy.cfg haproxy.cfg.bak
cat > /etc/haproxy/haproxy.cfg << EOF
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 192.168.0.200:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-masters
backend k8s-masters
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server jichao61 192.168.0.61:6443 check
server jichao62 192.168.0.62:6443 check
server jichao63 192.168.0.63:6443 check
EOF
service haproxy start
systemctl enable haproxy
scp /etc/haproxy/haproxy.cfg root@jichao62:/etc/haproxy/
scp /etc/haproxy/haproxy.cfg root@jichao63:/etc/haproxy/
修改 /etc/sysctl.conf
sysctl -a |grep local
vim /etc/sysctl.conf
---
net.ipv4.ip_nonlocal_bind = 1
---
sysctl -p
service haproxy start
systemctl enable haproxy
安装K8s
配置yum源[all]
cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum makecache
安装k8s1.28.1
安装最新的版本
yum -y install kubeadm kubelet kubectl
设置kubelet
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet
systemctl enable kubelet.service
准备docker镜像
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --cri-socket=unix://var/run/cri-dockerd.sock
docker image ls
初始化集群
# 执行之前在检查一次
kubeadm init --control-plane-endpoint=192.168.0.200:16443 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.28.13 --service-cidr=10.10.0.0/16 --pod-network-cidr=10.244.0.0/16 --cri-socket unix://var/run/cri-dockerd.sock
## 主节点执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
其他master加入集群
## 机器62 63 做完必须操作之后 在加入集群
# 61 同步 key 到 62 63
scp -r /etc/kubernetes/pki jichao62:/etc/kubernetes/
scp -r /etc/kubernetes/pki jichao63:/etc/kubernetes/
删除不需要的
可以把不需要的证书删除掉,jichao61 jichao62和jichao63保持一致。
cd /etc/kubernetes/pki/
rm -rf apiserver*
rm -rf etcd/peer.*
rm -rf etcd/server.*
另外两个主节点加入集群[62,63]
kubeadm join 192.168.0.200:16443 --token dovays.3iv6vn8wgvvgyulv \
--discovery-token-ca-cert-hash sha256:ec1a2c53d36bd07a140d5dfd0114794c73f8447ebbbafa7daa40f1a5dfee4871 \
--control-plane --cri-socket unix://var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
worker节点加入集群[64,65]
kubeadm join 192.168.0.200:16443 --token dovays.3iv6vn8wgvvgyulv \
--discovery-token-ca-cert-hash sha256:ec1a2c53d36bd07a140d5dfd0114794c73f8447ebbbafa7daa40f1a5dfee4871 --cri-socket=unix://var/run/cri-dockerd.sock
查看集群状态
kubectl get nodes
安装网络插件calico
更改配置文件
vim calico.yaml
# 搜索 同时更下自己的网卡
:/10.244
查看所需镜像
cat calico.yaml | grep image
# 我这个是最新的
网速好的话可以直接去apply
创建calico
kubectl apply -f calico.yaml
kubectl get pod -n kube-system
#我直接启动的,所以会出现restart的情况
拿掉主节点的污点[ 61 , 62 , 63 ]
k8s中默认主节点是不执行pod的,带了污点,这个时候如果要主节点也执行一定量的pod
那就需要下掉污点
kubectl describe node jichao61 | grep -i taint
kubectl describe node jichao62 | grep -i taint
kubectl describe node jichao63 | grep -i taint
# 不调度
kubectl taint node jichao61 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node jichao62 node-role.kubernetes.io/control-plane:NoSchedule-
kubectl taint node jichao63 node-role.kubernetes.io/control-plane:NoSchedule-
部署metric-server
kubectl apply -f components.yaml
kubectl top node
kubectl top pod -n kube-system
部署dashboard
kubectl apply -f recommended.yaml
kubectl apply -f dashboard-user.yaml
kubectl get pods -n kubernetes-dashboard
kubectl get svc-n kubernetes-dashboard
创建token
kubectl get ns
kubectl create token admin-user -n kubernetes-dashboard
eyJhbGciOiJSUzI1NiIsImtpZCI6IjFCeHdGcTJyVWY4TW0yaGZNeEk0b3lWZjE5LXBQOENFM1g5WnNYeTZyUGsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI2MTIwMDE4LCJpYXQiOjE3MjYxMTY0MTgsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNjJhYWUxMTAtNDg1OC00NGZiLTlmZGUtNDk0ODA2ODQyYzNjIn19LCJuYmYiOjE3MjYxMTY0MTgsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.j7lf8LQQzUD0REb6uFYNyRlkxzHNvU8mr3dGvG7X6QNclDMFbiynE6FskM4xw0tBuhtRjPdY3Qh7yMJ8XmniEpADfjZgVmEbsow6ZdrNKAPRMTJcGSCCD_wYsoHH24VLVCR_QNQbam_Wbf-kc1FlWRVzU4fmfWffdK_ToRNMuiW5i_Q5rBl_Csf4SJPj-itSwqR5D5Rsuj9xokJl1lWqA18QVcciGLMsUqQuoiz6rZx1NmaMoIINIcGpZZSQsliylJi9s2VSxMOmys9VywX79sgtasApRoyNSahjK-MRa4MgaA2qpfeBUyEfbD1bVxwv5EdPnsBVMRYZ8eu_LbA7fQ
登录dashboard
查看资源
测试
kubectl apply -f nginx-web.yaml
kubectl get pods,svc -o wide
随便哪个节点都可以访问到,只要端口正确即可。