m01上生成配置文件,然后将配置文件传到需要部署的节点,此次先在一个master节点部署3个服务

  • kube-apiserver
  • kube-controller-manager
  • kube-scheduler

下载kubernets二进制文件

在m01上操作

## 创建配置目录
mkdir -p /root/k8s/{app,ssl,config,service,bin,kubeconfig}
cd /root/k8s

## 下载kubernets二进制包
wget https://dl.k8s.io/v1.23.3/kubernetes-server-linux-amd64.tar.gz -O app/kubernetes-server.tar.gz

## 解压
tar -xf app/kubernetes-server.tar.gz  --strip-components=3 -C bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

生成ca证书

生成脚本 gen_ca_cert.sh

cat <<'EOF' | sudo tee gen_ca_cert.sh
#!/bin/bash
## 生成证书生成策略,让CA软件知道颁发有什么功能的证书,生成用来签发证书的根证书
cat > ca-config.json <<EOF1
{
    "signing": {
        "default": {
            "expiry": "87600h"
        },
        "profiles": {
            "peer": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
EOF1

## 生成CA证书签名请求的配置文件
cat > ca-csr.json <<EOF2
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "GuangDong",
            "ST": "GuangZhou",
            "O": "Kubernetes",
            "OU": "System"
        }
    ]
}
EOF2

## 生成ca证书和ca的私钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ssl/ca
EOF

执行

bash -x gen_ca_cert.sh

## 在ssl目录下生成
├── ca-key.pem
├── ca.pem

配置apiserver

生成apiserver所需证书

cat <<'EOF' | sudo tee gen_apiserver_cert.sh
#!/bin/bash
## -----------------------
## 生成 apiserver 的证书和私钥(apiserver和其它k8s组件通信使用)
APISERVER_NAME=$1

cat > kube-apiserver-csr.json <<EOF1
{
    "CN": "kube-apiserver",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "GuangDong",
            "ST": "GuangZhou",
            "O": "Kubernetes",
            "OU": "System"
        }
    ]
}
EOF1

cfssl gencert -ca=ssl/ca.pem -ca-key=ssl/ca-key.pem -config=ca-config.json \
	-hostname=${APISERVER_NAME} \
	-profile=peer kube-apiserver-csr.json | cfssljson -bare ssl/kube-apiserver

## 生成apiserver聚合证书,也是Metrics-server证书
## 生成ca签名请求文件
cat > front-proxy-ca-csr.json <<EOF2
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  },
  "ca": {
    "expiry": "876000h"
  }
}
EOF2

cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare ssl/front-proxy-ca

## 生成client证书请求文件
## 这的CN名称要和apiserver启动参数--requestheader-allowed-names=front-proxy-client相同
cat > front-proxy-client-csr.json <<EOF3
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF3

cfssl gencert -ca=ssl/front-proxy-ca.pem -ca-key=ssl/front-proxy-ca-key.pem -config=ca-config.json \
	-profile=peer front-proxy-client-csr.json | cfssljson -bare ssl/front-proxy-client

## 创建ServiceAccount Key —— secret
## serviceaccount账号的一种认证方式,创建serviceaccount的时候会创建一个与之绑定的secret,
## 这个secret会生成token
openssl genrsa -out ssl/sa.key 2048
openssl rsa -in ssl/sa.key -pubout -out ssl/sa.pub
EOF

执行

## 
bash -x gen_apiserver_cert.sh  127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,10.96.0.1,192.168.1.40,192.168.1.41,192.168.1.42,192.168.1.51,192.168.1.52,192.168.1.53

生成配置文件及启动service文件

生成脚本apiserver_config.sh

## --service-cluster-ip-range,该网段不能和宿主机的网段、pod网段重复
cat <<'EOF' | sudo tee apiserver_config.sh
#!/bin/bash

## 创建 kube-apiserver 启动参数配置文件
MASTER_ADDRESS=$1
ETCD_SERVERS=$2
ETCD_CERT_DIR=/opt/etcd/ssl
K8S_CERT_DIR=/opt/k8s/ssl
K8S_CONF_DIR=/opt/k8s/config
API_CERT_PRIFIX=kube-apiserver

cat > service/kube-apiserver.service.${MASTER_ADDRESS} <<EOF1
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=${MASTER_ADDRESS} \\
--advertise-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--service-cluster-ip-range=10.96.0.0/12 \
--service-node-port-range=30000-32767 \\
--etcd-servers=${ETCD_SERVERS} \\
--etcd-cafile=${ETCD_CERT_DIR}/etcd-ca.pem \\
--etcd-certfile=${ETCD_CERT_DIR}/etcd.pem \\
--etcd-keyfile=${ETCD_CERT_DIR}/etcd-key.pem \\
--client-ca-file=${K8S_CERT_DIR}/ca.pem \\
--tls-cert-file=${K8S_CERT_DIR}/${API_CERT_PRIFIX}.pem \\
--tls-private-key-file=${K8S_CERT_DIR}/${API_CERT_PRIFIX}-key.pem \\
--kubelet-client-certificate=${K8S_CERT_DIR}/${API_CERT_PRIFIX}.pem \\
--kubelet-client-key=${K8S_CERT_DIR}/${API_CERT_PRIFIX}-key.pem \\
--service-account-key-file=${K8S_CERT_DIR}/sa.pub \\
--service-account-signing-key-file=${K8S_CERT_DIR}/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--proxy-client-cert-file=${K8S_CERT_DIR}/front-proxy-client.pem \\
--proxy-client-key-file=${K8S_CERT_DIR}/front-proxy-client-key.pem \\
--requestheader-client-ca-file=${K8S_CERT_DIR}/front-proxy-ca.pem \\
--requestheader-allowed-names=front-proxy-client \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User
##--token-auth-file=\${K8S_CONF_DIR}/token.csv
    
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
EOF1
EOF

执行

## bash apiserver_config.sh <master_IP> <etcd_cluster>
bash -x apiserver_config.sh 192.168.1.51 https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379

bash -x apiserver_config.sh 192.168.1.52 https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379

bash -x apiserver_config.sh 192.168.1.53 https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379

## 在service目录下生成
├── kube-apiserver.service.192.168.1.51
├── kube-apiserver.service.192.168.1.52
├── kube-apiserver.service.192.168.1.53

分发二进制文件、证书及service文件

for i in 192.168.1.51; do \
ssh $i "mkdir -p /opt/k8s/{ssl,config,log}"; \
scp bin/kube-apiserver  $i:/usr/local/bin/ ;
scp ssl/{kube*.pem,ca{,-key}.pem,front-proxy-client*.pem,front-proxy-ca.pem,sa.*} $i:/opt/k8s/ssl/; \
scp service/kube-apiserver.service.$i $i:/usr/lib/systemd/system/kube-apiserver.service; \
done

启动kube-apiserver服务

for i in m01; do \
ssh $i "systemctl daemon-reload"; \
ssh $i "systemctl enable kube-apiserver"; \
ssh $i "systemctl restart kube-apiserver --no-block"; \
ssh $i "systemctl is-active kube-apiserver"; \
done

部署kubectl

生成kubectl所需证书

生成脚本gen_kubectl_cert.sh

cat <<EOF | sudo tee gen_kubectl_cert.sh
## 生成kubectl的证书和私钥
## 生成证书时会定义用户clusteradmin,并且O:system:masters组,k8s安装时有一个clusterrole,
## 是一个集群角色,对集群有最高管理权限,同时会创建一个clusterrolebingding,会把clusteradmin绑到
## system:masters 这个组上,然后这个组上的所有用户都会有这个集群的权限
cat > kubectl-csr.json <<EOF1
{
  "CN": "clusteradmin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF1

cfssl gencert -ca=ssl/ca.pem -ca-key=ssl/ca-key.pem -config=ca-config.json -profile=peer kubectl-csr.json | cfssljson -bare ssl/kubectl
EOF

执行

bash -x gen_kubectl_cert.sh

生成kubeconfig文件

生成脚本kubeconfig_kubectl_config.sh

## 参数根据自己部署需要进行修改
## USERNAME必须和证书申请的CN名字相同
cat <<'EOF' | sudo tee kubeconfig_kubectl_config.sh
#!/bin/bash
APISERVER_IP=$1
K8S_CERT_DIR=$2
PORT=8443
KUBE_APISERVER=https://${APISERVER_IP}:${PORT}
CLUSTER_NAME=kubernetes
USERNAME=clusteradmin
KUBECONFIG_FILE=kubeconfig/kubectl.kubeconfig
CONTEXT_NAME=${USERNAME}@${CLUSTER_NAME}
CERT_PRFIX=kubectl

## 设置集群参数
./bin/kubectl config set-cluster ${CLUSTER_NAME} \
    --certificate-authority=${K8S_CERT_DIR}/ca.pem \
    --embed-certs=true \
    --server=${KUBE_APISERVER} \
    --kubeconfig=${KUBECONFIG_FILE}

## 设置客户端认证参数
./bin/kubectl config set-credentials ${USERNAME} \
	--client-certificate=${K8S_CERT_DIR}/${CERT_PRFIX}.pem \
	--client-key=${K8S_CERT_DIR}/${CERT_PRFIX}-key.pem \
	--embed-certs=true \
	--kubeconfig=${KUBECONFIG_FILE}

## 设置context---将用户和集群关联起来
./bin/kubectl config set-context ${CONTEXT_NAME} \
    --cluster=${CLUSTER_NAME} \
    --user=${USERNAME} \
    --kubeconfig=${KUBECONFIG_FILE}

## 设置默认contexts
./bin/kubectl config use-context ${CONTEXT_NAME} \
    --kubeconfig=${KUBECONFIG_FILE}
EOF

执行

bash -x kubeconfig_kubectl_config.sh 192.168.1.40 ssl

## 在kubeconfig目录下生成
├── kubectl.kubeconfig

分发kubeconfig文件

## 分发kubeconfig证书到所有master节点
for i in m01; do \
ssh $i "mkdir -p $HOME/.kube/"; \
scp bin/kubectl $i:/usr/local/bin/; \
scp kubeconfig/kubectl.kubeconfig $i:$HOME/.kube/config; \
done

kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user clusteradmin --kubeconfig=$HOME/.kube/config

查看集群状态

kubectl cluster-info
Kubernetes control plane is running at https://192.168.1.40:8443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.


kubectl get componentstatus
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
scheduler            Unhealthy   Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused   
controller-manager   Unhealthy   Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused   
etcd-2               Healthy     {"health":"true","reason":""}                                                                  
etcd-0               Healthy     {"health":"true","reason":""}                                                                  
etcd-1               Healthy     {"health":"true","reason":""}                                                                  

kubectl get all -A
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   23m

部署kube-controller-manager组件

生成kube-controller-manager所需证书

创建生成证书脚本 gen_controller_cert.sh

cat <<'EOF' | sudo tee gen_controller_cert.sh
#!/bin/bash

CONTROLLER_IP=$1

## 生成 controller-manager证书签名请求
cat > kube-controller-manager-csr.json <<EOF1
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF1

cfssl gencert -ca=ssl/ca.pem -ca-key=ssl/ca-key.pem \
-config=ca-config.json \
-hostname=${CONTROLLER_IP} \
-profile=peer kube-controller-manager-csr.json | cfssljson -bare ssl/kube-controller-manager
EOF

执行

bash +x gen_controller_cert.sh 127.0.0.1,192.168.1.51,192.168.1.52,192.168.1.53

## 在ssl目录下生成
├── kube-controller-manager-key.pem
├── kube-controller-manager.pem

生成kubeconfig文件

cat <<'EOF' | sudo tee kubeconfig_kube-controller-manager.sh
#!/bin/bash
APISERVER_IP=$1
K8S_CERT_DIR=$2
PORT=8443
KUBE_APISERVER=https://${APISERVER_IP}:${PORT}
KUBECONFIG_FILE=kubeconfig/kube-controller-manager.kubeconfig
CLUSTER_NAME=kubernetes
USERNAME=system:kube-controller-manager
CONTEXT_NAME=${USERNAME}@${CLUSTER_NAME}
CERT_PRFIX=kube-controller-manager

## 设置集群参数
./bin/kubectl config set-cluster ${CLUSTER_NAME} \
    --certificate-authority=${K8S_CERT_DIR}/ca.pem \
    --embed-certs=true \
    --server=${KUBE_APISERVER} \
    --kubeconfig=${KUBECONFIG_FILE}

## 设置用户认证参数
./bin/kubectl config set-credentials ${USERNAME} \
	--client-certificate=${K8S_CERT_DIR}/${CERT_PRFIX}.pem \
	--client-key=${K8S_CERT_DIR}/${CERT_PRFIX}-key.pem \
	--embed-certs=true \
	--kubeconfig=${KUBECONFIG_FILE}

## 设置context---将用户和集群关联起来
./bin/kubectl config set-context ${CONTEXT_NAME} \
    --cluster=${CLUSTER_NAME} \
    --user=${USERNAME} \
    --kubeconfig=${KUBECONFIG_FILE}

## 设置默认context
./bin/kubectl config use-context ${CONTEXT_NAME} \
    --kubeconfig=${KUBECONFIG_FILE}
EOF

执行

## example:./kube-controller-manager_config.sh <MASTER_IPADDR> <证书目录>
➜ bash -x kubeconfig_kube-controller-manager.sh 192.168.1.40 ssl

## 在kubeconfig目录下生成
├── kube-controller-manager.kubeconfig

生成kube-controller-manager的service文件

生成脚本controller_manager_config.sh

## --cluster-cidr为pod网段,不能和宿主机网段,service网段重复
cat <<'EOF' | sudo tee kube-controller-manager.sh
#!/bin/bash
K8S_CERT_DIR=/opt/k8s/ssl
K8S_CONF_DIR=/opt/k8s/config

cat > service/kube-controller-manager.service <<EOF1
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--v=2 \\
--bind-address=127.0.0.1 \\
--root-ca-file=${K8S_CERT_DIR}/ca.pem \\
--cluster-signing-cert-file=${K8S_CERT_DIR}/ca.pem \\
--cluster-signing-key-file=${K8S_CERT_DIR}/ca-key.pem \\
--service-account-private-key-file=${K8S_CERT_DIR}/sa.key \\
--tls-cert-file=${K8S_CERT_DIR}/kube-controller-manager.pem \\
--tls-private-key-file=${K8S_CERT_DIR}/kube-controller-manager-key.pem \\
--kubeconfig=${K8S_CONF_DIR}/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=2m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--allocate-node-cidrs=true \\
--cluster-cidr=172.16.0.0/16 \\
--requestheader-client-ca-file=${K8S_CERT_DIR}/front-proxy-ca.pem \\
--node-cidr-mask-size=24
      
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF1
EOF

执行

bash kube-controller-manager.sh

## 在service目录下生成以下文件
├── kube-controller-manager.service

分发二进制文件、证书、kubeconfig文件及service文件

for i in m01; do \
ssh $i "mkdir -p /opt/k8s/{ssl,config}"; \
scp bin/kube-controller-manager  $i:/usr/local/bin/;
scp ssl/kube-controller*.pem $i:/opt/k8s/ssl/; \
scp service/kube-controller-manager.service $i:/usr/lib/systemd/system/; \
scp kubeconfig/kube-controller-manager.kubeconfig $i:/opt/k8s/config/; \
done

启动kube-controller-manager服务

for i in m01; do \
ssh $i "systemctl daemon-reload"; \
ssh $i "systemctl enable kube-controller-manager"; \
ssh $i "systemctl restart kube-controller-manager --no-block"; \
ssh $i "systemctl is-active kube-controller-manager"; \
done

验证

ss -tlp | grep kube-controller
LISTEN 0      16384      127.0.0.1:10257             0.0.0.0:*    users:(("kube-controller",pid=14666,fd=7))

curl -s --cacert /opt/k8s/ssl/ca.pem https://127.0.0.1:10257/metrics | head
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "forbidden: User \"system:anonymous\" cannot get path \"/metrics\"",
  "reason": "Forbidden",
  "details": {},
  "code": 403
}

部署kube-scheduler组件

生成kube-scheduler所需证书

生成脚本gen_schduler_cert.sh

cat <<'EOF' | sudo tee gen_schduler_cert.sh
#!/bin/bash
#生成 kube-scheduler 的证书和私钥
SCHEDULER_IP=$1
CSR_NAME_PREFIX=kube-scheduler

cat > ${CSR_NAME_PREFIX}-csr.json <<EOF1
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF1

cfssl gencert -ca=ssl/ca.pem -ca-key=ssl/ca-key.pem \
-config=ca-config.json \
-hostname=${SCHEDULER_IP} \
-profile=peer ${CSR_NAME_PREFIX}-csr.json | cfssljson -bare ssl/${CSR_NAME_PREFIX}
EOF

执行

bash -x gen_schduler_cert.sh 127.0.0.1,192.168.1.51,192.168.1.52,192.168.1.53

生成kubeconfig文件

生成脚本kubeconfig_kube-scheduler.sh

cat <<'EOF' | sudo tee kubeconfig_kube-scheduler.sh 
#!/bin/bash

APISERVER_IP=$1
K8S_CERT_DIR=$2
PORT=8443
KUBE_APISERVER=https://${APISERVER_IP}:${PORT}
KUBECONFIG_FILE=kubeconfig/kube-scheduler.kubeconfig
CLUSTER_NAME=kubernetes
USERNAME=system:kube-scheduler
CONTEXT_NAME=${USERNAME}@${CLUSTER_NAME}
CERT_PRFIX=kube-scheduler

## 设置集群参数
./bin/kubectl config set-cluster ${CLUSTER_NAME} \
    --certificate-authority=${K8S_CERT_DIR}/ca.pem \
    --embed-certs=true \
    --server=${KUBE_APISERVER} \
    --kubeconfig=${KUBECONFIG_FILE}

## 设置用户认证参数
./bin/kubectl config set-credentials ${USERNAME} \
	--client-certificate=${K8S_CERT_DIR}/${CERT_PRFIX}.pem \
	--client-key=${K8S_CERT_DIR}/${CERT_PRFIX}-key.pem \
	--embed-certs=true \
	--kubeconfig=${KUBECONFIG_FILE}

## 设置context---将用户和集群关联起来
./bin/kubectl config set-context ${CONTEXT_NAME} \
    --cluster=${CLUSTER_NAME} \
    --user=${USERNAME} \
    --kubeconfig=${KUBECONFIG_FILE}

## 设置默认context
./bin/kubectl config use-context ${CONTEXT_NAME} \
    --kubeconfig=${KUBECONFIG_FILE}
EOF

添加可执行权限并运行

bash -x kubeconfig_kube-scheduler.sh 192.168.1.40 ssl

## 在kubeconfig目录下生成
├── kube-scheduler.kubeconfig

生成kube-scheduler的service文件

生成脚本kube-scheduler.sh

cat <<'EOF' | sudo tee kube-scheduler.sh
#!/bin/bash

K8S_CONF_DIR=/opt/k8s/config

cat > service/kube-scheduler.service <<EOF1
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--v=2 \\
--bind-address=127.0.0.1 \\
--leader-elect=true \\
--kubeconfig=${K8S_CONF_DIR}/kube-scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF1
EOF

执行

bash -x kube-scheduler.sh

## 在service目录下生成
├── kube-scheduler.service

分发二进制文件、证书、kubeconfig文件及service文件

for i in m01; do \
ssh $i "mkdir -p /opt/k8s/{ssl,config}"; \
scp bin/kube-scheduler $i:/usr/local/bin/ ;
scp ssl/kube-scheduler*.pem $i:/opt/k8s/ssl/; \
scp service/kube-scheduler.service $i:/usr/lib/systemd/system/; \
scp kubeconfig/kube-scheduler.kubeconfig $i:/opt/k8s/config/; \
done

启动kube-scheduler服务

for i in m01; do \
ssh $i "systemctl daemon-reload"; \
ssh $i "systemctl enable kube-scheduler"; \
ssh $i "systemctl restart kube-scheduler --no-block"; \
ssh $i "systemctl is-active kube-scheduler"; \
done

验证集群

kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
controller-manager   Healthy   ok                              
scheduler            Healthy   ok                              
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""}