操作系统 centos7.9 <br> k8s 1.25.0 <br> docker 20.10.17

配置hostname及hosts

# 修改2台机器主机为master node01
vim /etc/hostname

# 修改2台机器/etc/hosts
192.168.2.191 master
192.168.2.192 node01

时间同步

# 两台机器启动chronyd
systemctl start chronyd
# 设置开机自启
systemctl enable chronyd

关闭防火墙

# 停止防火墙
systemctl stop firewalld
# 关闭开机自启
systemctl disable firewalld

关闭selinux

# 有些机器可能不需要执行,报错可忽略
sentenforce 0
# 关闭selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

docker

安装

yum install yum-utils
# 添加阿里云镜像
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安装docker
yum install -y docker-ce

# 启动docker
systemctl start docker

修改配置

vim /etc/docker/daemon.json
{
        "exec-opts":["native.cgroupdriver=systemd"],
        "registry-mirrors" : [
            "http://ovfftd6p.mirror.aliyuncs.com",
            "http://hub-mirror.c.163.com",
            "https://zt8w2oro.mirror.aliyuncs.com",
            "http://registry.docker-cn.com",
            "http://docker.mirrors.ustc.edu.cn"
         ]
}

# 重启docker
systemctl restart docker

关闭交换内存

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

# 生效
sysctl --system
modprobe br_netfilter
lsmod | grep br_netfilter

重置containerd配置

# 重置为默认配置
containerd config default > /etc/containerd/config.toml
# 修改配置为国内镜像
sed -i 's/k8s.gcr.io/registry.aliyuncs.com\/google_containers/' /etc/containerd/config.toml

配置k8s国内镜像

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kubeadm,kubelet,kubectl

yum install -y kubeadm kubelet kubectl
# 查看版本
kubectl version
# 设置开机启动
systemctl enable kubelet

组件镜像拉取(master节点执行)

kubeadm config images list
registry.k8s.io/kube-apiserver:v1.25.0
registry.k8s.io/kube-controller-manager:v1.25.0
registry.k8s.io/kube-scheduler:v1.25.0
registry.k8s.io/kube-proxy:v1.25.0
registry.k8s.io/pause:3.8
registry.k8s.io/etcd:3.5.4-0
registry.k8s.io/coredns/coredns:v1.9.3

for i in $(kubeadm config images list);do \
# imageName=${i#k8s.gcr.io/}
# imageName=${i##*/} # 删除/左边的
imageName=${i##*/} \
# 删除registry.k8s.io/
docker pull registry.aliyuncs.com/google_containers/$imageName \
docker tag registry.aliyuncs.com/google_containers/$imageName  k8s.gcr.io/$imageName \
docker rmi registry.aliyuncs.com/google_containers/$imageName \
done;

# 检查是否都拉去并修改了
docker images 

REPOSITORY                           TAG       IMAGE ID       CREATED        SIZE
k8s.gcr.io/kube-apiserver            v1.25.0   4d2edfd10d3e   2 weeks ago    128MB
k8s.gcr.io/kube-controller-manager   v1.25.0   1a54c86c03a6   2 weeks ago    117MB
k8s.gcr.io/kube-scheduler            v1.25.0   bef2cf311509   2 weeks ago    50.6MB
k8s.gcr.io/kube-proxy                v1.25.0   58a9a0c6d96f   2 weeks ago    61.7MB
k8s.gcr.io/pause                     3.8       4873874c08ef   2 months ago   711kB
k8s.gcr.io/etcd                      3.5.4-0   a8a176a5d5d6   3 months ago   300MB
k8s.gcr.io/coredns                   v1.9.3    5185b96f0bec   3 months ago   48.8MB

初始化(master节点执行)

kubeadm init --image-repository=registry.aliyuncs.com/google_containers --apiserver-advertise-address=192.168.2.191 --kubernetes-version=v1.25.0 --pod-network-cidr=10.244.0.0/16

# 安装成功显示
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.2.191:6443 --token 2gu197.0bnvkd4aa97im2gf \
	--discovery-token-ca-cert-hash sha256:eae16427f790e869106fd556b273d1229a5ca214dfb30d0fa3c1c6602676d1bc 

从节点加入(node节点执行)

kubeadm join 192.168.2.191:6443 --token 2gu197.0bnvkd4aa97im2gf \
	--discovery-token-ca-cert-hash sha256:eae16427f790e869106fd556b273d1229a5ca214dfb30d0fa3c1c6602676d1bc 

# 成功结果
kubeadm join 192.168.2.191:6443 --token 2gu197.0bnvkd4aa97im2gf --discovery-token-ca-cert-hash sha256:eae16427f790e869106fd556b273d1229a5ca214dfb30d0fa3c1c6602676d1bc 
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

检测是否安装成功

# master节点执行
kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   24m   v1.25.0
node01   NotReady   <none>          10m   v1.25.0
[root@master flanel]# kubectl get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-c676cc86f-d92mm          0/1     Pending   0          24m
coredns-c676cc86f-p6gnz          0/1     Pending   0          24m
etcd-master                      1/1     Running   0          24m
kube-apiserver-master            1/1     Running   0          24m
kube-controller-manager-master   1/1     Running   0          24m
kube-proxy-jw4hz                 1/1     Running   0          24m
kube-proxy-k6866                 1/1     Running   0          10m
kube-scheduler-master            1/1     Running   0          24m
# 上面status为NotReady则有问题,接下来排查问题
kubectl describe nodes
# 其中有一部分为
# container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized
# 说明是cni(容器网络)插件的问题
kubectl get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-c676cc86f-8hf8k          0/1     Pending   0          13h
coredns-c676cc86f-tmc5d          0/1     Pending   0          13h
etcd-master                      1/1     Running   0          13h
kube-apiserver-master            1/1     Running   0          13h
kube-controller-manager-master   1/1     Running   0          13h
kube-proxy-xn5dk                 1/1     Running   0          13h
kube-scheduler-master            1/1     Running   0          13h
# 可以看到coredns都是pending

安装flannel网络工具

kube-flannel官方地址

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

安装与验证

网络好可以执行下面命令下载
curl -O https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml
或者直接执行
kubectl -n kube-system apply -f https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml

[root@master srv]# kubectl create ns kube-flannel
[root@master srv]# kubectl -n kube-system apply -f kube-flannel.yml 
namespace/kube-flannel unchanged
clusterrole.rbac.authorization.k8s.io/flannel unchanged
clusterrolebinding.rbac.authorization.k8s.io/flannel unchanged
the namespace from the provided object "kube-flannel" does not match the namespace "kube-system". You must pass '--namespace=kube-flannel' to perform this operation.
the namespace from the provided object "kube-flannel" does not match the namespace "kube-system". You must pass '--namespace=kube-flannel' to perform this operation.
the namespace from the provided object "kube-flannel" does not match the namespace "kube-system". You must pass '--namespace=kube-flannel' to perform this operation.

# 再次验证
[root@master srv]# kubectl get pod -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-jr7sb   1/1     Running   0          9h
kube-flannel-ds-kfhnv   1/1     Running   0          9h
[root@master flanel]# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   9h    v1.25.0
node01   Ready    <none>          9h    v1.25.0

kubectl get pods -n kube-system -o wide
NAME                             READY   STATUS    RESTARTS   AGE   IP              NODE     NOMINATED NODE   READINESS GATES
coredns-c676cc86f-d92mm          1/1     Running   0          11h   10.244.0.2      master   <none>           <none>
coredns-c676cc86f-p6gnz          1/1     Running   0          11h   10.244.0.3      master   <none>           <none>
etcd-master                      1/1     Running   0          11h   192.168.2.191   master   <none>           <none>
kube-apiserver-master            1/1     Running   0          11h   192.168.2.191   master   <none>           <none>
kube-controller-manager-master   1/1     Running   0          11h   192.168.2.191   master   <none>           <none>
kube-proxy-jw4hz                 1/1     Running   0          11h   192.168.2.191   master   <none>           <none>
kube-proxy-k6866                 1/1     Running   0          11h   192.168.2.192   node01   <none>           <none>
kube-scheduler-master            1/1     Running   0          11h   192.168.2.191   master   <none>           <none>

可视化界面kuboard安装

内建用户安装kuboard

docker run -d \
  --restart=unless-stopped \
  --name=kuboard \
  -p 80:80/tcp \
  -p 10081:10081/tcp \
  -e KUBOARD_ENDPOINT="http://内网ip(192.168.2.192):80" \
  -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \
  -v /root/kuboard-data:/data \
  eipwork/kuboard:v3

默认用户名:admin 密码: Kuboard123

创建命名空间demo及安装mysql

# 创建命名空间
kubectl create ns demo

demo-mysql-01.yml

apiVersion: apps/v1                             # apiserver的版本
kind: Deployment                                # 副本控制器deployment,管理pod和RS
metadata:
  name: mysql                                   # deployment的名称,全局唯一
  namespace: default                            # deployment所在的命名空间
  labels:
    app: mysql
spec:
  replicas: 1                                   # Pod副本期待数量
  selector:
    matchLabels:                                # 定义RS的标签
      app: mysql                                # 符合目标的Pod拥有此标签
  strategy:                                     # 定义升级的策略
    type: RollingUpdate                         # 滚动升级,逐步替换的策略
  template:                                     # 根据此模板创建Pod的副本(实例)
    metadata:
      labels:
        app: mysql                              # Pod副本的标签,对应RS的Selector
    spec:
      # nodeName: k8s-worker01                    # 指定pod运行在的node
      containers:                               # Pod里容器的定义部分
        - name: mysql                           # 容器的名称
          image: mysql:8.0                     # 容器对应的docker镜像
          volumeMounts:                         # 容器内挂载点的定义部分
            - name: time-zone                   # 容器内挂载点名称
              mountPath: /etc/localtime         # 容器内挂载点路径,可以是文件或目录
            - name: mysql-data
              mountPath: /var/lib/mysql         # 容器内mysql的数据目录
            - name: mysql-logs
              mountPath: /var/log/mysql         # 容器内mysql的日志目录
          ports:
            - containerPort: 3306               # 容器暴露的端口号
          env:                                  # 写入到容器内的环境容量
            - name: MYSQL_ROOT_PASSWORD         # 定义了一个mysql的root密码的变量
              value: "123456"
      volumes:                                  # 本地需要挂载到容器里的数据卷定义部分
        - name: time-zone                       # 数据卷名称,需要与容器内挂载点名称一致
          hostPath:
            path: /etc/localtime                # 挂载到容器里的路径,将localtime文件挂载到容器里,可让容器使用本地的时区
        - name: mysql-data
          hostPath:
            path: /data/mysql/data              # 本地存放mysql数据的目录
        - name: mysql-logs
          hostPath:
            path: /data/mysql/logs              # 本地存入mysql日志的目录

k8s运行

kubectl create -f demo-mysql-01.yml -n demo
# 查看容器状态
kubectl -n demo get pods 
[root@master mysql]# kubectl get pods -n demo
NAME                     READY   STATUS              RESTARTS   AGE
mysql-78f4fd7f9c-llnrv   0/1     ContainerCreating   0          16s

# 查看容器具体状态
kubectl -n demo  describe pod
其中一部分是
Events:
  Type    Reason     Age    From               Message
  ----    ------     ----   ----               -------
  Normal  Scheduled  3m22s  default-scheduler  Successfully assigned demo/mysql-78f4fd7f9c-llnrv to node01
  Normal  Pulling    3m21s  kubelet            Pulling image "mysql:8.0"
  
表示还在拉取镜像,耐心等待一会
kubectl get pods -n demo
NAME                     READY   STATUS    RESTARTS   AGE
mysql-78f4fd7f9c-llnrv   1/1     Running   0          6m7s

访问mysql

命令行方式

kubectl -n demo  exec -it mysql-78f4fd7f9c-llnrv -- mysql -uroot -p

Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 10
Server version: 8.0.30 MySQL Community Server - GPL

Copyright (c) 2000, 2022, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> 

kuboard命令行

参考

kuboard官网