文章目录

  • 一、部署仓库和k8s环境
  • 1.装docker,docker-ce文件夹,server1,2,3,4
  • 2.harbor仓库
  • 3.server2做k8s的master端,3,4做node节点
  • 二、k8s调度
  • 1.nodeName 是节点选择约束的最简单方法,但一般不推荐。如果 nodeName 在 PodSpec 中指定了,则它优先于其他的节点选择方法
  • 2.nodeSelector 是节点选择约束的最简单推荐形式。
  • 3.节点亲和与反亲和
  • 1)节点亲和,硬限制一定要满足,软限制可没有
  • 2)亲和性,pod在一个节点上
  • 3)反亲和性,不再一个节点上
  • 4.容忍
  • 1)cordon 停止调度
  • 2)draind:迁移到其他节点调用
  • 3)delete:直接删除,再用的时候要重新启动kubelet
  • 三、kubernetes访问控制
  • 1.kubernetes API 访问控制
  • 1)创建serviceaccount
  • 2)创建UserAccount
  • 3)RBAC(Role Based Access Control):基于角色访问控制授权。



一、部署仓库和k8s环境

#新建4个虚拟机demo1,2,3,4;1做仓库,2做master,3和4做node

/isos/rhel-8.2-x86_64-dvd.iso /var/www/html/westos/ iso9660 loop,ro 0 0

1.装docker,docker-ce文件夹,server1,2,3,4

%%%server1仓库
##安装docker-ce
#把docker-ce的文件夹下载并放到真机的/var/www/html
[root@server1~]# vim /etc/yum.repos.d/docker.repo
[root@server1~]# cat /etc/yum.repos.d/docker.repo 
[docker]
name=docker-ce
baseurl=http://172.25.254.3/docker-ce
gpgcheck=0

[root@server1~]# yum install docker-ce -y
[root@server1~]# systemctl start docker
[root@server1~]# systemctl enable docker
[root@server1~]# docker info#有两个警告
[root@server1~]# vim /etc/sysctl.d/docker.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
[root@server1~]# syscel --system
[root@server1~]# docker info#完成

##加速
[root@server11 ~]# ls
99-sysctl.conf  docker  rhel7.tar
[root@server11 ~]# cd /etc/docker/
[root@server11 docker]# ls
key.json
[root@server11 docker]# vim daemon.json
[root@server11 docker]# cat daemon.json 
{
  "registry-mirrors": ["https://qxj9x6qf.mirror.aliyuncs.com"]
}
[root@server1 docker]# systemctl daemon-reload
[root@server1 docker]# systemctl reload docker



##加密和认证
[root@server1 ~]# mkdir -p certs
[root@server1 ~]# openssl req -newkey rsa:4096 -nodes -sha256 -keyout certs/westos.org.key -x509 -days 365 -out certs/westos.org.crt
Country Name (2 letter code) [XX]:cn
State or Province Name (full name) []:shanxi
Locality Name (eg, city) [Default City]:xian
Organization Name (eg, company) [Default Company Ltd]:westos
Organizational Unit Name (eg, section) []:linux
Common Name (eg, your name or your server's hostname) []:reg.westos.org
Email Address []:root@westos.org
[root@server1 ~]# vim /etc/hosts
172.25.3.1 server1 reg.westos.org
[root@server1 ~]# docker pull registry
[root@server1 ~]# docker run -d --name registry -p 443:443 -v /opt/registry:/var/lib/registry -v "$(pwd)"/certs:/certs -e REGISTRY_HTTP_ADDR=0.0.0.0:443 -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/westos.org.crt -e REGISTRY_HTTP_TLS_KEY=/certs/westos.org.key registry
ae23ee8761a9796e1aed3bb81b66f7b1e5b93447ed346167339bdad6f916a751
[root@server1 ~]# docker ps
CONTAINER ID   IMAGE      COMMAND                  CREATED          STATUS          PORTS                            NAMES
ae23ee8761a9   registry   "/entrypoint.sh /etc…"   13 seconds ago   Up 11 seconds   0.0.0.0:443->443/tcp, 5000/tcp   registry
[root@server1 ~]# docker images

[root@server1 ~]# docker tag yakexi007/game2048:latest reg.westos.org/game2048:latest
[root@server1 ~]# docker push reg.westos.org/game2048:latest 
[root@server1 ~]# mkdir /etc/docker/certs.d/reg.westos.org/ -p
[root@server1 ~]# cp certs/westos.org.crt /etc/docker/certs.d/reg.westos.org/ca.crt
[root@server1 ~]# ll /etc/docker/certs.d/reg.westos.org/ca.crt
-rw-r--r-- 1 root root 2102 Jan 24 19:11 /etc/docker/certs.d/reg.westos.org/ca.crt
[root@server1 ~]# docker push reg.westos.org/game2048:latest
[root@server1 ~]# docker rm -f registry 


%%%server2,3,4都作
##安装docker-ce,方法同server1,完成docker info
[root@server2 docker]# pwd
/etc/docker
[root@server2 docker]# rm -fr daemon.json 
[root@server2 docker]# systemctl reload docker
[root@server2 docker]# vim /etc/hosts
10.4.17.241 server11 reg.westos.org
[root@server2 docker]# mkdir /etc/docker/certs.d/reg.westos.org/ -p
[root@server1 ~]# scp  /etc/docker/certs.d/reg.westos.org/ca.crt server2://etc/docker/certs.d/reg.westos.org/
[root@server2 ~]# docker pull reg.westos.org/game2048

2.harbor仓库

[root@server1 ~]# ls
docker-compose-Linux-x86_64-1.24.1  harbor-offline-installer-v1.10.1.tgz docker-compose-Linux-x86_64-1.27.0
[root@server11 ~]# tar zxf harbor-offline-installer-v1.10.1.tgz
[root@server11 ~]# cd harbor/
[root@server11 harbor]# ls

[root@server11 harbor]# mv docker-compose-Linux-x86_64-1.27.0 /usr/local/bin/docker-compose
[root@server11 ~]# chmod +x /usr/local/bin/docker-compose
[root@server11 ~]# cp -r certs/ /
[root@server11 ~]# cd /certs/
[root@server11 certs]# ls
westos.org.crt  westos.org.key

[root@server11 ~]# cd harbor/
[root@server11 harbor]# vim harbor.yml
hostname: reg.westos.org
certificate: /certs/westos.org.crt
private_key: /certs/westos.org.key
harbor_admin_password: westos
[root@server11 harbor]# ./install.sh
#网页:192.168.100.241/ 用户:Admin,密码westos---》登陆
##测试,可上传
[root@server11 ~]# cd /etc/docker
[root@server11 docker]# docker tag reg.westos.org/game2048:latest reg.westos.org/library/game2048:latest
[root@server11 harbor]# docker push reg.westos.org/library/game2048:latest

%%%server2,3,4都作
[root@server2 ~]# cd /etc/docker/
[root@server2 docker]# ls
[root@server2 docker]# vim daemon.json
{
  "registry-mirrors": ["https://reg.westos.org"]
}
[root@server2 docker]# systemctl reload docker
[root@server2 docker]# docker pull reg.westos.org/library/game2048:latest#可拉取

3.server2做k8s的master端,3,4做node节点

##配置文件daemon.json和k8s.repo
[root@server2 ~]# vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://reg.westos.org"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
[root@server2 ~]# vim /etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
##禁掉swap区
[root@server2 ~]# vim /etc/fstab
#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
[root@server2 ~]# swapoff -a

[root@server2 ~]# yum install -y kubelet-1.20.2 kubeadm-1.20.2 kubectl-1.20.2
[root@server2 ~]# systemctl enable --now kubelet
[root@server2 ~]# systemctl daemon-reload

##在server1上传到harbor仓库k8s镜像
#初始化
[root@server2 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --image-repository reg.westos.org/k8s --kubernetes-version v1.20.2
[root@server2 ~]# mkdir -p $HOME/.kube
[root@server2 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@server2 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@server2 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc#命令补齐,重连生效
##下载kube-flannel.yml文件
[root@server2 ~]# vim kube-flannel.yml#改两个adm64镜像image;vxlan改成host-gw
        image: reg.westos.org/k8s/flannel:v0.12.0-amd64
	image: reg.westos.org/k8s/flannel:v0.12.0-amd64
        "Type": "host-gw"
[root@server2 ~]# kubectl apply -f kube-flannel.yml#安装网络组件
[root@server2 sysctl.d]# kubectl get pod --namespace kube-system#全部启动,成功
NAME                              READY   STATUS    RESTARTS   AGE
coredns-7f89b7bc75-pdw7l          0/1     Running   0          10m
coredns-7f89b7bc75-r28lt          0/1     Running   0          10m
etcd-server2                      1/1     Running   0          11m
kube-apiserver-server2            1/1     Running   0          11m
kube-controller-manager-server2   1/1     Running   1          11m
kube-flannel-ds-amd64-hf8hg       1/1     Running   0          3h27m
kube-flannel-ds-amd64-lh6r6       1/1     Running   0          3h27m
kube-proxy-4vnhg                  1/1     Running   0          10m
kube-scheduler-server2            1/1     Running   0          11m
[root@server2 ~]# docker save quay.io/coreos/flannel:v0.12.0-amd64 registry.aliyuncs.com/google_containers/pause:3.2 registry.aliyuncs.com/google_containers/coredns:1.7.0 registry.aliyuncs.com/google_containers/kube-proxy:v1.20.2 >  node.tar

%%%server3,4
##配置文件daemon.json和k8s.repo
[root@server2 ~]# vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://reg.westos.org"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
[root@server2 ~]# vim /etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
##禁掉swap区
[root@server2 ~]# vim /etc/fstab
#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
[root@server2 ~]# swapoff -a

[root@server2 ~]# yum install -y kubelet-1.20.2 kubeadm-1.20.2 kubectl-1.20.2
[root@server2 ~]# systemctl enable --now kubelet
[root@server2 ~]# systemctl daemon-reload

[root@server3 ~]# docker load -i node.tar
#导出node节点所需镜像并导入各节点
[root@server3 ~]# kubeadm join 172.25.0.2:6443 --token a55bse.7wbzxx70srbrr7ko  --discovery-token-ca-cert-hash sha256:03b69cf689dc4cedfd52cd63167d06fdef69aa76d271b19428dd39fa254a0319
[root@server2 ~]# kubectl get node#有server2,3

二、k8s调度

%%调度节点官网文档:https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/

1.nodeName 是节点选择约束的最简单方法,但一般不推荐。如果 nodeName 在 PodSpec 中指定了,则它优先于其他的节点选择方法

[root@server2 ~]# vim pod.yml
[root@server2 ~]# cat pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx
  nodeName: server3

[root@server2 ~]# kubectl apply -f pod.yaml
[root@server2 ~]# kubectl get pod -o wide#在server3上
NAME    READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
nginx   1/1     Running   0          23s   10.244.1.3   server3   <none>           <none>


[root@server2 ~]# vim pod.yml
[root@server2 ~]# cat pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx
  nodeName: server3

[root@server2 ~]# kubectl delete pod nginx
[root@server2 ~]# vim pod.yml#nodeName: server5不存在,就调度不成功
[root@server2 ~]# kubectl apply -f pod.yml

2.nodeSelector 是节点选择约束的最简单推荐形式。

[root@server2 ~]# vim pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    env: test
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeSelector:
    disktype: ssd
[root@server2 ~]# kubectl label nodes server3 disktype=ssd
node/server3 labeled
[root@server2 ~]# kubectl get node --show-labels
[root@server2 ~]# kubectl get node -L disktype
NAME      STATUS   ROLES                  AGE    VERSION   DISKTYPE
server2   Ready    control-plane,master   138m   v1.20.2   
server3   Ready    <none>                 81m    v1.20.2   ssd
server4   Ready    <none>                 81m    v1.20.2  

[root@server2 ~]# kubectl apply -f pod.yml 
[root@server2 ~]# kubectl get pod -o wide
NAME    READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
nginx   1/1     Running   0          33s   10.244.1.4   server3   <none>           <none>
[root@server2 ~]# kubectl delete -f pod.yml

k8s仓库 k8s仓库冻结_docker

3.节点亲和与反亲和

nodeSelector 提供了一种非常简单的方法来将 pod 约束到具有特定标签的节点上。亲和/反亲和功能极大地扩展了你可以表达约束的类型。
你可以发现规则是“软”/“偏好”,而不是硬性要求,因此,如果调度器无法满足该要求,仍然调度该 pod
你可以使用节点上的 pod 的标签来约束,而不是使用节点本身的标签,来允许哪些 pod 可以或者不可以被放置在一起。

1)节点亲和,硬限制一定要满足,软限制可没有
#requiredDuringSchedulingIgnoredDuringExecution 	必须满足
#preferredDuringSchedulingIgnoredDuringExecution	倾向满足,可有多个

[root@server2 ~]# kubectl label node server4 disktype=sata
node/server4 labeled
[root@server2 ~]# vim pod.yml 
[root@server2 ~]# cat pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: node-affinity
spec:
  containers:
  - name: nginx
    image: nginx
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
           nodeSelectorTerms:
           - matchExpressions:
             - key: disktype
               operator: In
               values:
                 - ssd
                 - sata
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 1
        preference:
          matchExpressions:
          - key: roles
            operator: In
            values:
            - nginx
[root@server2 ~]# kubectl label node server4 roles=nginx
node/server4 labeled
[root@server2 ~]# kubectl get node -L roles
NAME      STATUS   ROLES                  AGE    VERSION   ROLES
server2   Ready    control-plane,master   146m   v1.20.2   
server3   Ready    <none>                 88m    v1.20.2   
server4   Ready    <none>                 88m    v1.20.2   nginx
[root@server2 ~]# kubectl get node -L disktype
NAME      STATUS   ROLES                  AGE    VERSION   DISKTYPE
server2   Ready    control-plane,master   146m   v1.20.2   
server3   Ready    <none>                 89m    v1.20.2   ssd
server4   Ready    <none>                 88m    v1.20.2   sata
[root@server2 ~]# kubectl apply -f pod.yml 
pod/node-affinity created
[root@server2 ~]# kubectl get pod -o wide#调度到server4上
NAME            READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
node-affinity   1/1     Running   0          10s   10.244.2.3   server4   <none>           <none>
[root@server2 ~]#
2)亲和性,pod在一个节点上
[root@server2 ~]# vim pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: nginx
---
apiVersion: v1
kind: Pod
metadata:
  name: myapp
  labels:
    app: myapp
spec:
  containers:
  - name: myapp
    image: myapp:v1
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: In
            values:
            - nginx
        topologyKey: kubernetes.io/hostname

[root@server2 ~]# kubectl apply -f pod.yml 
pod/nginx created
pod/myapp created
[root@server2 ~]# kubectl get pod -o wide
NAME    READY   STATUS    RESTARTS   AGE   IP            NODE      NOMINATED NODE   READINESS GATES
myapp   1/1     Running   0          3s    10.244.1.12   server3   <none>           <none>
nginx   1/1     Running   0          3s    10.244.1.11   server3   <none>           <none>
3)反亲和性,不再一个节点上
[root@server2 ~]# vim pod.yml 
    podAntiAffinity:
[root@server2 ~]# kubectl delete -f pod.yml --force
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "nginx" force deleted
pod "myapp" force deleted
[root@server2 ~]# kubectl apply -f pod.yml 
pod/nginx created
pod/myapp created
[root@server2 ~]# kubectl get pod -o wide
NAME    READY   STATUS              RESTARTS   AGE   IP       NODE      NOMINATED NODE   READINESS GATES
myapp   0/1     ContainerCreating   0          2s    <none>   server4   <none>           <none>
nginx   0/1     ContainerCreating   0          2s    <none>   server3   <none>           <none>

k8s仓库 k8s仓库冻结_docker_02

k8s仓库 k8s仓库冻结_vim_03

4.容忍

%容忍官方:https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/

#servr2上有污点,所以不参加调度
[root@server2 ~]# kubectl describe nodes server2 |grep Taint
Taints:             node-role.kubernetes.io/master:NoSchedule

[root@server2 ~]# vim pod.yml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-server
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx

[root@server2 ~]# kubectl apply -f pod.yml ###删除控制器kubectl delete deplouments.apps web-server
deployment.apps/web-server created
[root@server2 ~]# kubectl get pod -o wide
NAME                          READY   STATUS    RESTARTS   AGE   IP            NODE      NOMINATED NODE   READINESS GATES
web-server-6799fc88d8-7kf58   1/1     Running   0          2s    10.244.2.17   server4   <none>           <none>
web-server-6799fc88d8-vxxzd   1/1     Running   0          2s    10.244.2.16   server4   <none>           <none>
web-server-6799fc88d8-zfll5   1/1     Running   0          2s    10.244.1.19   server3   <none>           <none>
#给server3上加上污点,server3上的pod会自动被移动到server4上
[root@server2 ~]# kubectl taint node server3 key1=v1:NoExecute
node/server3 tainted
[root@server2 ~]# kubectl get pod -o wide
NAME                          READY   STATUS    RESTARTS   AGE   IP            NODE      NOMINATED NODE   READINESS GATES
web-server-6799fc88d8-7kf58   1/1     Running   0          70s   10.244.2.17   server4   <none>           <none>
web-server-6799fc88d8-9xsjk   1/1     Running   0          27s   10.244.2.18   server4   <none>           <none>
web-server-6799fc88d8-vxxzd   1/1     Running   0          70s   10.244.2.16   server4   <none>

k8s仓库 k8s仓库冻结_json_04

[root@server2 ~]# kubectl taint node server4 key2=v2:NoExecute
node/server4 tainted
[root@server2 ~]# kubectl get pod -o wide
NAME                          READY   STATUS    RESTARTS   AGE   IP       NODE     NOMINATED NODE   READINESS GATES
web-server-6799fc88d8-2vtpg   0/1     Pending   0          36s   <none>   <none>   <none>           <none>
web-server-6799fc88d8-5dm6d   0/1     Pending   0          36s   <none>   <none>   <none>           <none>
web-server-6799fc88d8-z2dnd   0/1     Pending   0          36s   <none>   <none>   <none>  

[root@server2 ~]# vim pod.yml #operator: "Exists"#容忍所有污点
#最后加上
      tolerations:
      - operator: "Exists"

k8s仓库 k8s仓库冻结_k8s仓库_05

[root@server2 ~]# kubectl apply -f pod.yml 
[root@server2 ~]# kubectl get pod -o wide#所有的节点都会iu参加调度
[root@server2 ~]# kubectl taint node server3 key1-
node/server3 untainted
[root@server2 ~]# kubectl taint node server4 key2-
node/server4 untainted
  • 影响Pod调度的指令还有:cordon、drain、delete
1)cordon 停止调度
[root@server2 ~]# kubectl cordon server3
node/server3 cordoned
[root@server2 ~]# kubectl get node
NAME      STATUS                     ROLES                  AGE     VERSION
server2   Ready                      control-plane,master   3h46m   v1.20.2
server3   Ready,SchedulingDisabled   <none>                 169m    v1.20.2
server4   Ready                      <none>                 169m    v1.20.2
2)draind:迁移到其他节点调用
[root@server2 ~]# kubectl drain server3
[root@server2 ~]# kubectl drain server3 --ignore-daemonsets
[root@server2 ~]# kubectl get node
NAME      STATUS                     ROLES                  AGE     VERSION
server2   Ready                      control-plane,master   3h46m   v1.20.2
server3   Ready,SchedulingDisabled   <none>                 169m    v1.20.2
server4   Ready                      <none>                 169m    v1.20.2
3)delete:直接删除,再用的时候要重新启动kubelet
[root@server2 ~]# kubectl delete node server3
node "server3" deleted
[root@server2 ~]# kubectl get node
NAME      STATUS   ROLES                  AGE     VERSION
server2   Ready    control-plane,master   3h50m   v1.20.2
server4   Ready    <none>  
[root@server3 ~]# systemctl restart kubelet

k8s仓库 k8s仓库冻结_vim_06

三、kubernetes访问控制

1.kubernetes API 访问控制

#网页harbor仓库中新建私有仓库westos

1)创建serviceaccount
[root@server2 ~]# kubectl create secret docker-registry myregistrykey --docker-server=reg.westos.org --docker-username=admin --docker-password=westos --docker-email=yakexi007@westos.org##用于存储docker registry的认证信息
[root@server2 ~]# kubectl create serviceaccount admin##创建serviceaccount
serviceaccount/admin created
[root@server2 ~]# kubectl get sa
NAME      SECRETS   AGE
admin     1         11s
default   1         4h21m
[root@server2 ~]# kubectl describe sa admin
Name:                admin
Namespace:           default
Labels:              <none>
Annotations:         <none>
Image pull secrets:  <none>
Mountable secrets:   admin-token-kvccw
Tokens:              admin-token-kvccw
Events:              <none>
[root@server2 ~]# kubectl patch serviceaccount admin -p '{"imagePullSecrets": [{"name": "myregistrykey"}]}'##添加secrets到serviceaccount
serviceaccount/admin patched
[root@server2 ~]# kubectl describe sa admin
Name:                admin
Namespace:           default
Labels:              <none>
Annotations:         <none>
Image pull secrets:  myregistrykey
Mountable secrets:   admin-token-kvccw
Tokens:              admin-token-kvccw
Events:              <none>

k8s仓库 k8s仓库冻结_vim_07

[root@server2 ~]# vim pod.yml ##把serviceaccount和pod绑定起来
apiVersion: v1
kind: Pod
metadata:
  name: myapp
  labels:
    app: myapp
spec:
  containers:
  - name: myapp
    image: reg.westos.org/westos/game2048
  serviceAccountName: admin
[root@server2 ~]# kubectl apply -f pod.yml 
pod/myapp created
[root@server2 ~]# kubectl get pod
NAME                          READY   STATUS              RESTARTS   AGE
myapp                         1/1     Running             0          2s

[root@server2 ~]# kubectl patch serviceaccount default -p '{"imagePullSecrets": [{"name": "myregistrykey"}]}'#绑定挂载到default上
[root@server2 ~]# kubectl get sa
NAME      SECRETS   AGE
admin     1         15m
default   1         4h36m
[root@server2 ~]# kubectl get sa default
NAME      SECRETS   AGE
default   1         4h36m
[root@server2 ~]# kubectl describe sa default
Name:                default
Namespace:           default
Labels:              <none>
Annotations:         <none>
Image pull secrets:  myregistrykey
Mountable secrets:   default-token-77s8w
Tokens:              default-token-77s8w


[root@server2 ~]# vim pod.yml 
apiVersion: v1
kind: Pod
metadata:
  name: myapp
  labels:
    app: myapp
spec:
  containers:
  - name: myapp
    image: reg.westos.org/westos/game2048

[root@server2 ~]# kubectl apply -f pod.yml 
[root@server2 ~]# kubectl get pod
NAME                          READY   STATUS              RESTARTS   AGE
myapp                         1/1     Running             0          2s
[root@server2 ~]# kubectl delete -f pod.yml
2)创建UserAccount
[root@server2 ~]# cd /etc/kubernetes/pki/
[root@server2 pki]# openssl genrsa -out test.key 2048
[root@server2 pki]# openssl x509 -in test.crt -text -noout
[root@server2 pki]# kubectl config set-credentials test --client-certificate=/etc/kubernetes/pki/test.crt --client-key=/etc/kubernetes/pki/test.key --embed-certs=true
[root@server2 pki]# kubectl  config view
[root@server2 pki]# kubectl config set-context test@kubernetes --cluster=kubernetes --user=test
[root@server2 pki]# kubectl config use-context test@kubernetes
Switched to context "test@kubernetes".
[root@server2 pki]# kubectl  get pod#此时用户通过认证,但还没有权限操作集群资源,需要继续添加授权。
Error from server (Forbidden): pods is forbidden: User "test" cannot list resource "pods" in API group "" in the namespace "default"

k8s仓库 k8s仓库冻结_json_08

3)RBAC(Role Based Access Control):基于角色访问控制授权。

允许管理员通过Kubernetes API动态配置授权策略。RBAC就是用户通过角色与权限进行关联。
RBAC只有授权,没有拒绝授权,所以只需要定义允许该用户做什么即可。
RBAC包括四种类型:Role、ClusterRole、RoleBinding、ClusterRoleBinding。

%1.Role
[root@server2 pki]# vim rbac.yaml
[root@server2 pki]# cat rbac.yaml 
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: default
  name: myrole
rules:
- apiGroups: [""] 
  resources: ["pods"]
  verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
[root@server2 pki]# kubectl apply -f rbac.yaml 
role.rbac.authorization.k8s.io/myrole created
[root@server2 pki]# kubectl get role
NAME     CREATED AT
myrole   2021-03-20T09:40:03Z



%2.RoleBinding
[root@server2 pki]# kubectl delete -f rbac.yaml --force
[root@server2 pki]# vim rbac.yaml
#添加
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: test-read-pods
  namespace: default
subjects:
- kind: User
  name: test
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: Role
  name: myrole
  apiGroup: rbac.authorization.k8s.io

[root@server2 pki]# kubectl apply -f rbac.yaml 
role.rbac.authorization.k8s.io/myrole created
rolebinding.rbac.authorization.k8s.io/test-read-pods created
[root@server2 pki]# kubectl get role
NAME     CREATED AT
myrole   2021-03-20T09:42:13Z
[root@server2 pki]# kubectl get rolebinding.rbac.authorization.k8s.io
NAME             ROLE          AGE
test-read-pods   Role/myrole   47s
###此时能查看
[root@server2 pki]# kubectl config use-context test@kubernetes
Switched to context "test@kubernetes".
[root@server2 pki]# kubectl get pod
NAME    READY   STATUS    RESTARTS   AGE
myapp   1/1     Running   0          6m7s
[root@server2 pki]# kubectl config use-context kubernetes-admin@kubernetes

%3.ClusterRole
[root@server2 pki]# kubectl delete -f rbac.yaml --force
[root@server2 pki]# vim rbac.yaml 
#添加
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: myclusterrole
rules:
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["get", "watch", "list", "delete", "create", "update"]
- apiGroups: ["extensions", "apps"]
  resources: ["deployments"]
  verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]

[root@server2 pki]# kubectl apply -f rbac.yaml 
##此时能删除增加改
[root@server2 pki]# kubectl config use-context test@kubernetes
Switched to context "test@kubernetes".
[root@server2 pki]# kubectl get pod
NAME    READY   STATUS    RESTARTS   AGE
myapp   1/1     Running   0          9m36s
[root@server2 pki]# kubectl delete pod myapp

%4.RoleBinding
[root@server2 pki]# kubectl config use-context kubernetes-admin@kubernetes
[root@server2 pki]# kubectl delete -f rbac.yaml --force
[root@server2 pki]# vim rbac.yaml 
#添加
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rolebind-myclusterrole
  namespace:  default
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: myclusterrole
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: test
[root@server2 pki]# kubectl apply -f rbac.yaml 
[root@server2 ~]# kubectl apply -f pod.yml 
##此时只能看test自己的,别的看不了
[root@server2 pki]# kubectl config use-context test@kubernetes
Switched to context "test@kubernetes".
[root@server2 pki]# kubectl get pod --namespace kube-system
Error from server (Forbidden): pods is forbidden: User "test" cannot list resource "pods" in API group "" in the namespace "kube-system"


%5.ClusterRoleBinding
[root@server2 pki]# kubectl config use-context kubernetes-admin@kubernetes
[root@server2 pki]# kubectl delete -f rbac.yaml --force
[root@server2 pki]# vim rbac.yaml 
#添加集群的
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: clusterrolebinding-myclusterrole
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: myclusterrole
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: test
[root@server2 pki]# kubectl apply -f rbac.yaml 
[root@server2 pki]# kubectl config use-context test@kubernetes
##此时可以看整个集群的
[root@server2 pki]# kubectl get pod --namespace kube-system