CentOS7.9 kubernetes 1.25.0 docker 20.10.17

清理minikube安装环境

minikube delete 如果之前安装过minikube,需要删除 minikube delete,否则配置文件路径一样导致问题,同时会多出一个网卡及一个路由 kubeadm已经初始化过需要重置 kubeadm reset

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

selinux关闭

sentenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

设置hostname

hostnamectl --static set-hostname master
或者修改/etc/hosts /etc/hostname

docker安装

yum install yum-utils
# 添加阿里云镜像
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安装docker
yum install -y docker-ce

# 启动docker
systemctl start docker

docker镜像改为国内地址

vim /etc/docker/daemon.json
{
        "exec-opts":["native.cgroupdriver=systemd"],
        "registry-mirrors" : [
            "http://ovfftd6p.mirror.aliyuncs.com",
            "http://hub-mirror.c.163.com",
            "https://zt8w2oro.mirror.aliyuncs.com",
            "http://registry.docker-cn.com",
            "http://docker.mirrors.ustc.edu.cn"
         ]
}

#重启docker
systemctl restart docker

关闭交换内存

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

# 生效
sysctl --system
modprobe br_netfilter
lsmod | grep br_netfilter

修改kubernetes镜像为国内镜像

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0 # 设置1会报错
repo_gpgcheck=0 # 设置1会校验报错
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装 kubelet kubeadm kubectl

# 安装kubelet 等组件
yum install -y kubelet kubeadm kubectl

containerd安装 修改镜像地址

/etc/containerd/config.toml k8s.gcr.io
错误:[kubelet-check] Initial timeout of 40s passed

sed -i 's/k8s.gcr.io/registry.aliyuncs.com\/google_containers/' /etc/containerd/config.toml

安装docker会自动安装,先删除/etc/containerd/config.toml 然后重启containerd systemctl restart containerd

下载镜像并修改

 kubeadm config images list
registry.k8s.io/kube-apiserver:v1.25.0
registry.k8s.io/kube-controller-manager:v1.25.0
registry.k8s.io/kube-scheduler:v1.25.0
registry.k8s.io/kube-proxy:v1.25.0
registry.k8s.io/pause:3.8
registry.k8s.io/etcd:3.5.4-0
registry.k8s.io/coredns/coredns:v1.9.3

 docker pull registry.aliyuncs.com/kube-apiserver:v1.25.0

# 获取并替换
for i in $(kubeadm config images list);do
# imageName=${i#k8s.gcr.io/}
imageName=${i#registry.k8s.io/} # 删除registry.k8s.io/
docker pull registry.aliyuncs.com/google_containers/$imageName
docker tag registry.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.aliyuncs.com/google_containers/$imageName
done;
# coredns 需要去掉一层目录
docker pull registry.aliyuncs.com/google_containers/coredns:v1.9.3

集群初始化

# k8s启动命令
kubeadm init --image-repository=registry.aliyuncs.com/google_containers --apiserver-advertise-address=10.168.11.143 --kubernetes-version=v1.25.0 --pod-network-cidr=10.244.0.0/16

报错 image.png

rm -rf /etc/containerd/config.toml
systemctl restart containerd

# 解决bridge-nf-call-iptables dose not exists
modprobe br_netfilter
# 修改值为1 vim无法修改
echo 1 > /proc/sys/net/ipv4/ip_forward

image.png

containerd config default > /etc/containerd/config.toml
sed -i 's/k8s.gcr.io/registry.aliyuncs.com\/google_containers/' /etc/containerd/config.toml
systemctl restart containerd

初始化集群

kubeadm init --image-repository=registry.aliyuncs.com/google_containers --apiserver-advertise-address=192.168.2.100 --kubernetes-version=v1.25.0 --pod-network-cidr=10.244.0.0/16

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

解除master部署pod的限制

# master节点设置可以作为工作节点
kubectl taint nodes --all node-role.kubernetes.io/master-
# 这个是我部署时遇到taint错误执行的,各自根据情况修改
kubectl taint nodes --all node-role.kubernetes.io/control-plane-

上面命令可能执行失败,遇到错误时执行就好了 参考: k8s搭建单机集群(1.24之前) centos7部署单机集群 centos7部署单机k8s

验证是否可用

kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   13h   v1.25.0
# 上面status为NotReady则有问题,接下来排查问题
kubectl describe nodes
# 其中有一部分为
# container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized
# 说明是cni(容器网络)插件的问题
kubectl get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-c676cc86f-8hf8k          0/1     Pending   0          13h
coredns-c676cc86f-tmc5d          0/1     Pending   0          13h
etcd-master                      1/1     Running   0          13h
kube-apiserver-master            1/1     Running   0          13h
kube-controller-manager-master   1/1     Running   0          13h
kube-proxy-xn5dk                 1/1     Running   0          13h
kube-scheduler-master            1/1     Running   0          13h
# 可以看到coredns都是pending
网络好可以执行下面命令下载
curl -O https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml
或者直接执行
kubectl -n kube-system apply -f https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml

[root@master srv]# kubectl create ns kube-flannel
[root@master srv]# kubectl -n kube-system apply -f kube-flannel.yml 
namespace/kube-flannel unchanged
clusterrole.rbac.authorization.k8s.io/flannel unchanged
clusterrolebinding.rbac.authorization.k8s.io/flannel unchanged
the namespace from the provided object "kube-flannel" does not match the namespace "kube-system". You must pass '--namespace=kube-flannel' to perform this operation.
the namespace from the provided object "kube-flannel" does not match the namespace "kube-system". You must pass '--namespace=kube-flannel' to perform this operation.
the namespace from the provided object "kube-flannel" does not match the namespace "kube-system". You must pass '--namespace=kube-flannel' to perform this operation.

# 再次验证
[root@master srv]# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   15h   v1.25.0
 kubectl get pods -o wide -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE   IP              NODE     NOMINATED NODE   READINESS GATES
coredns-c676cc86f-8hf8k          1/1     Running   0          27h   10.244.0.3      master   <none>           <none>
coredns-c676cc86f-tmc5d          1/1     Running   0          27h   10.244.0.2      master   <none>           <none>
etcd-master                      1/1     Running   0          27h   192.168.2.100   master   <none>           <none>
kube-apiserver-master            1/1     Running   0          27h   192.168.2.100   master   <none>           <none>
kube-controller-manager-master   1/1     Running   0          27h   192.168.2.100   master   <none>           <none>
kube-flannel-ds-fkmqc            1/1     Running   0          13h   192.168.2.100   master   <none>           <none>
kube-proxy-xn5dk                 1/1     Running   0          27h   192.168.2.100   master   <none>           <none>
kube-scheduler-master            1/1     Running   0          27h   192.168.2.100   master   <none>           <none>

网络不好可以复制下面kube-flannel.yml

flannel网络安装

kube-flannel官方地址 version:0.19.2

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

图形化界面kuboard

sudo docker run -d \
  --restart=unless-stopped \
  --name=kuboard \
  -p 80:80/tcp \
  -p 10081:10081/tcp \
  -e KUBOARD_ENDPOINT="http://内网IP:80" \
  -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \
  -v /root/kuboard-data:/data \
  eipwork/kuboard:v3

kuboard(k8s学习)

image.png学习交流群