一、通过kubeadm安装不同版本的K8S

通过kubeadm安装不同版本的k8s,由于k8s版本更新过快,本文以k8s版本v1.21.3为准,centos7.6, 安装 kubeadm、kubelet、kubectl 均使用 yum 安装,网络组件选用的是 flannel。

二、环境的相关信息

IP Hostname 配置 节点 备注
172.16.4.169 k8s-master 4c8g master
172.16.4.121 k8s-node1 4c8g work1
172.16.4.203 k8s-node2 4c8g work2

三、OS系统的基本设置

1.配置hosts解析

cat >> /etc/hosts <<EOF 
172.16.4.169  k8s-master
172.16.4.121  k8s-node1
172.16.4.203  k8s-node2 
EOF

2.生成ssh秘钥,分发不同主机,配置免密登陆

# 生成ssh密钥,直接一路回车 
ssh-keygen -t rsa
cat /root/.ssh/id_rsa.pub 
# 复制刚刚生成的密钥到各节点可信列表中,需分别输入各主机密码 
ssh-copy-id root@k8s-node1
ssh-copy-id root@k8s-node2

3.禁用swap

swap 仅当内存不够时会使用硬盘块充当额外内存,硬盘的 io 较内存差距极大,禁用 swap 以提高性能各节点均需执行:

swapoff -a 
cp /etc/fstab  /etc/fstab.bak
cat /etc/fstab.bak | grep -v swap > /etc/fstab

4.关闭SElinux

关闭 SELinux,否则 kubelet 挂载目录时可能报错 Permission denied,可以设置为 permissivedisabledpermissive 会提示 warn 信息各节点均需执行:

setenforce 0 
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

5.设置时区、同步时间

timedatectl set-timezone Asia/Shanghai 
systemctl enable --now chronyd
#查看同步状态
timedatectl status
# 将当前的 UTC 时间写入硬件时钟 
timedatectl set-local-rtc 0 
# 重启依赖于系统时间的服务 
systemctl restart rsyslog && systemctl restart crond

6.关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

7.修改内核参数

cp /etc/sysctl.conf{,.bak}
#追加配置文件
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.conf
echo "vm.swappiness = 0" >> /etc/sysctl.conf

#启用配置
modprobe br_netfilter
sysctl -p
#输出如下:
[root@Copy-of-EE-CentOS76-v1 ~]# sysctl -p
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.all.forwarding = 1
vm.swappiness = 0

8.开启IPVS支持

vim  /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
  /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    /sbin/modprobe ${kernel_module}
  fi
done

chmod 755 /etc/sysconfig/modules/ipvs.modules 
sh /etc/sysconfig/modules/ipvs.modules 
lsmod | grep ip_vs

#输出如下:
[root@Copy-of-EE-CentOS76-v1 ~]# lsmod |grep ip_vs
ip_vs_ftp              13079  0 
nf_nat                 26583  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_wlc              12519  0 
ip_vs_lc               12516  0 
ip_vs                 145497  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          139264  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

8.升级系统内核版本

1.查看系统内核
[root@Copy-of-EE-CentOS76-v1 ~]# uname -r
3.10.0-1127.el7.x86_64
2.安装内核库
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
3.查看内核列表
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
4.安装长期支持版本
yum --enablerepo=elrepo-kernel install kernel-lt-devel kernel-lt -y
5.查看系统上可以使用的内核
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
6.设置新内核grub2的默认版本
grub2-set-default 0
7.生成grub配置文件
grub2-mkconfig -o /boot/grub2/grub.cfg
8.重启OS
reboot

9.使桥接流量对iptables可见

cat > /etc/sysctl.d/k8s.conf <<EOF 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
 
sysctl --system
#验证是否生效,均返回1即正确
sysctl -n net.bridge.bridge-nf-call-iptables 
sysctl -n net.bridge.bridge-nf-call-ip6tables

四、Docker安装和配置

1.添加docker的yum源

# 安装必要依赖 
yum install -y yum-utils device-mapper-persistent-data lvm2 
# 添加 aliyun docker-ce yum 源 
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
# 重建 yum 缓存 
yum makecache fast

2.安装docker

# 查看可用 docker 版本 
yum list docker-ce.x86_64 --showduplicates | sort -r
#安装最新版本
yum install docker-ce -y

3.确保网络模块开机自动加载

lsmod | grep overlay 
lsmod | grep br_netfilter
#若上面的命令无返回值或提示文件不存在,需执行如下命令:
cat > /etc/modules-load.d/docker.conf <<EOF 
overlay 
br_netfilter 
EOF

#执行如下:
modprobe overlay 
modprobe br_netfilter

4.配置docker

修改cgroup驱动为system(k8s官方推荐)、限制容器日志量、修改存储类型,最好docker的家目录可修改为:

##添加镜像加速
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "registry-mirrors": ["https://gp8745ui.mirror.aliyuncs.com"],
  "data-root": "/data/docker"
}
EOF

#修改服务脚本13行
vim /lib/systemd/system/docker.service
 
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --default-ulimit core=0:0

#服务自启动
systemctl daemon-reload
systemctl enable --now docker

#验证docker是否正常
docker info
#测试docker
docker run nginx
docker rm nginx

五、部署K8S集群

1.添加K8S源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 重建yum缓存,输入y添加证书认证
 yum makecache faset -y

2.安装kubeadm、kubelet、kubectl

  • 各节点均需安装kubeadm、kubelet
  • kubectl仅master节点安装,work节点不需要
yum list docker-ce.x86_64 --showduplicates | sort -r 
##查看对应的版本
 yum list kubelet --showduplicates |sort -r
##通过version指定版本进行相应的安装
version=1.21.3-0
yum install -y kubelet-${version} kubeadm-${version} kubectl-${version}
systemctl enable kubelet

3.配置自动补全命令

# 安装 bash 自动补全插件
yum install bash-completion -y
# 设置 kubectl 与 kubeadm 命令补全,下次 login 生效
kubectl completion bash >/etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm

4.查看指定k8s版本需要的镜像

kubeadm config images list --kubernetes-version v1.21.3

[root@copy-of-ee-centos76-v1 yum.repos.d]# kubeadm config images list --kubernetes-version v1.21.3
k8s.gcr.io/kube-apiserver:v1.21.3
k8s.gcr.io/kube-controller-manager:v1.21.3
k8s.gcr.io/kube-scheduler:v1.21.3
k8s.gcr.io/kube-proxy:v1.21.3
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0

5.定制脚本拉取镜像

#编辑拉取镜像的脚本
vim pullimages.sh
#!/bin/bash
# pull images
 
ver=v1.21.3
registry=registry.cn-hangzhou.aliyuncs.com/google_containers
images=`kubeadm config images list --kubernetes-version=$ver |awk -F '/' '{print $2}'`
 
for image in $images
do
if [ $image != coredns ];then
    docker pull ${registry}/$image
    if [ $? -eq 0 ];then
        docker tag ${registry}/$image k8s.gcr.io/$image
        docker rmi ${registry}/$image
    else
        echo "ERROR: 下载镜像报错,$image"
    fi
else
    docker pull coredns/coredns:1.8.0
    docker tag coredns/coredns:1.8.0  k8s.gcr.io/coredns/coredns:v1.8.0
    docker rmi coredns/coredns:1.8.0
fi
done

##执行文件
chmod +x pullimages.sh && ./pullimages.sh

##查看镜像
docker images
[root@copy-of-ee-centos76-v1 k8s]# docker images
REPOSITORY                           TAG        IMAGE ID       CREATED         SIZE
nginx                                latest     605c77e624dd   2 months ago    141MB
hello-world                          latest     feb5d9fea6a5   6 months ago    13.3kB
k8s.gcr.io/kube-apiserver            v1.21.3    3d174f00aa39   8 months ago    126MB
k8s.gcr.io/kube-scheduler            v1.21.3    6be0dc1302e3   8 months ago    50.6MB
k8s.gcr.io/kube-proxy                v1.21.3    adb2816ea823   8 months ago    103MB
k8s.gcr.io/kube-controller-manager   v1.21.3    bc2bb319a703   8 months ago    120MB
k8s.gcr.io/pause                     3.4.1      0f8457a4c2ec   14 months ago   683kB
k8s.gcr.io/coredns/coredns           v1.8.0     296a6d5035e2   17 months ago   42.5MB
k8s.gcr.io/etcd                      3.4.13-0   0369cf4303ff   19 months ago   253MB

###导出镜像拷贝到其他节点
docker save $(docker images | grep -v REPOSITORY | awk 'BEGIN{OFS=":";ORS=" "}{print $1,$2}') -o k8s-images.tar

#远程拷贝
scp k8s-images.tar root@172.16.4.121:/k8s/

#从其他节点导入
docker load -i k8s-images.tar

6.修改kubelet配置默认cgroup driver

mkdir /var/lib/kubelet
 
cat > /var/lib/kubelet/config.yaml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
EOF

7.初始化master节点

仅master节点执行如下步骤:

7.1、生成kubeadm初始化配置时用

kubeadm config print init-defaults > kubeadm-config.yaml

#修改配置文件1
localAPIEndpoint:
  advertiseAddress: 1.2.3.4
修改为:
localAPIEndpoint:
  advertiseAddress: 172.16.4.169
  name: k8s-master
  
#修改配置文件1
kubernetesVersion: 1.21.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
# 替换为:
kubernetesVersion: 1.21.3
networking:
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12

7.2、测试环境是否正常

kubeadm init phase preflight

7.3、初始化master

kubeadm init --config=kubeadm-config.yaml --ignore-preflight-errors=2 --upload-certs | tee kubeadm-init.log

具体如下:
[root@copy-of-ee-centos76-v1 k8s]# kubeadm init --config=kubeadm-config.yaml --ignore-preflight-errors=2 --upload-certs | tee kubeadm-init.log
W0325 11:28:34.336378   17338 strict.go:54] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta2", Kind:"InitConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "name"
[init] Using Kubernetes version: v1.21.3
[preflight] Running pre-flight checks
	[WARNING Hostname]: hostname "node" could not be reached
	[WARNING Hostname]: hostname "node": lookup node on 172.16.4.1:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local node] and IPs [10.96.0.1 172.16.4.169]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost node] and IPs [172.16.4.169 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost node] and IPs [172.16.4.169 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 14.502016 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.21" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
2c706bca1cd4f95dfa7afdffd2bec150c712ad728f8549d263fd90ec7a4fbfd6
[mark-control-plane] Marking the node node as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node node as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.4.169:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:beda3abf8009059a56774c0fadf481025016048d6b36aa39e59412056baf89dc 

4.为日常用户的集群用户添加kubectl的使用权限

su - tidb
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/admin.conf
sudo chown $(id -u):$(id -g) $HOME/.kube/admin.conf
echo "export KUBECONFIG=$HOME/.kube/admin.conf" >> ~/.bashrc
exit

7.4、配置master认证

echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> /etc/profile 
source  /etc/profile

7.5、安装网络组建flannel

此时 master 节点已经初始化成功,但是还未安装网络组件,还无法与其他节点通讯。需要安装网络组件,一般使用flannel

curl -o kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml    # 这里下载镜像非常慢,我还是先手动拉下来吧,不行就多试几次
docker pull quay.io/coreos/flannel:v0.14.0
kubectl apply -f kube-flannel.yml

7.6、查看节点的状态

[root@copy-of-ee-centos76-v1 k8s]# kubectl get nodes 
NAME   STATUS   ROLES                  AGE   VERSION
node   Ready    control-plane,master   17m   v1.21.3

#如果 STATUS 提示 NotReady,可以通过 kubectl describe node master节点 查看具体的描述信息,性能差的服务器到达 Ready 状态时间会长些。

7.7、查看加入集群的token命令

kubeadm token create --print-join-command

[root@copy-of-ee-centos76-v1 k8s]# kubeadm token create --print-join-command
kubeadm join 172.16.4.169:6443 --token pkng7m.vrjxxwedxf183w0b --discovery-token-ca-cert-hash sha256:beda3abf8009059a56774c0fadf481025016048d6b36aa39e59412056baf89dc 

8.初始化计算节点并加入集群

输入加入k8s命令

kubeadm join 172.16.4.169:6443 --token pkng7m.vrjxxwedxf183w0b --discovery-token-ca-cert-hash sha256:beda3abf8009059a56774c0fadf481025016048d6b36aa39e59412056baf89dc 


输出结果如下:
[root@copy-of-ee-centos76-v1 k8s]# kubeadm join 172.16.4.169:6443 --token pkng7m.vrjxxwedxf183w0b --discovery-token-ca-cert-hash sha256:beda3abf8009059a56774c0fadf481025016048d6b36aa39e59412056baf89dc 
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

9.在master节点查看加入的节点

[root@copy-of-ee-centos76-v1 k8s]# kubectl get nodes 
NAME                        STATUS   ROLES                  AGE     VERSION
copy-of-ee-centos76-v1.05   Ready    <none>                 3m16s   v1.21.3
node                        Ready    control-plane,master   58m     v1.21.3

六、部署Dashboard

6.1、下载dashboard的yml文件

curl -o recommended.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

修改service类型 修改 Service 为 NodePort 类型,暴露到外部

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard

6.2、下载对应的镜像

docker pull kubernetesui/dashboard:v2.3.1
docker pull kubernetesui/metrics-scraper:v1.0.6
 
kubectl apply -f recommended.yaml
#查看对应的服务
[root@k8s-master k8s]# kubectl get pods,svc -n kubernetes-dashboard
NAME                                             READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-856586f554-rk6r5   1/1     Running   0          128m
pod/kubernetes-dashboard-67484c44f6-9h7zk        1/1     Running   0          128m

NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
service/dashboard-metrics-scraper   ClusterIP   10.110.82.22    <none>        8000/TCP        128m
service/kubernetes-dashboard        NodePort    10.108.52.134   <none>        443:30001/TCP   128m

6.3、通过浏览器访问 访问地址:https://NodeIP:30001

6.4、创建对应的账号信息

kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

##如下:
[root@k8s-master k8s]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name:         dashboard-admin-token-84x84
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 505cb890-3b40-47c0-b4c5-125d72996c64

Type:  kubernetes.io/service-account-token

Data
====
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IlYxc2pxT1hRQkdZNGFaLUtPOWpEYVZLM1FIeFJPVzFvOXA2aGp6RS0xSjQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tODR4ODQiLCJrdWJlcm5ld
ca.crt:     1066 bytes
namespace:  11 bytes
就可以使用token登陆dashboard了