PV

集群管理员将配置好的那些存储空间引入至K8S集群中,定义成PV (Persistent Volume,持久化卷)

PVC

K8S用户在创建Pod时如果要用到PVC时,必须先创建PVC( 在K8S集群中找一个能符合条件的存储卷PV来用)。注意:PV和PVC是一一对应关系,一旦某个PV被一个PVC占用,那么这个PV就不能再被其他PVC占用,被占用的PV的状态会显示为Bound。PVC创建以后,就相当于一个存储卷,可以被多个 Pod所使用。

PV保留策略有两种:

retain:删除pod,删除pvc,pv状态变为released,不可复用(pv状态必须available才可复用)

优点:防止误删数据

缺点:需要手工清理无用数据,忘记的话,占用存储空间

delete(动态制备):

优点:删除pod,删除pvc的时候,会自动把pv和底层数据删除,节省空间

缺点:容易误删数据

动态卷流程(默认策略delete)

1. 准备NFS服务器

2. 获取NFS 插件文件,上传并解压

[root@master ~]# tar -zxvf 11-nfs-subdir-external-provisioner.tar.gz

1. 设置RBAC权限

Role Based Access Control 基于角色访问控制

rbac.yaml文件是规定角色对应的模块具有哪些权限。默认使用的ns default,我的环境使用的ns是memeda

sed -i 's/namespace: default/namespace: memeda/g' rbac.yaml
[root@master deploy]# kubectl apply -f rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

2. 修改NFS配置文件

[root@master deploy]# vim deployment.yaml
[root@master deploy]# cat deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: memeda
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/cloudcs/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.44.159
            - name: NFS_PATH
              value: /share_data
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.44.159
            path: /share_data


[root@master deploy]# kubectl apply -f deployment.yaml
deployment.apps/nfs-client-provisioner created
[root@master deploy]# kubectl get pod
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-6cfc5f7988-9hw5h   1/1     Running   0          14s

3. 创建存储类

必须的,存储类默认使用的保留策略就是delete

[root@master deploy]# kubectl apply -f class.yaml
storageclass.storage.k8s.io/nfs-client created
[root@master deploy]# kubectl get sc
NAME         PROVISIONER                                   RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   k8s-sigs.io/nfs-subdir-external-provisioner   Delete          Immediate           false                  2s


4. 创建PVC

不需要创建pv的,pv会随pvc自动创建出来的

[root@master deploy]# vim test-claim.yaml
[root@master deploy]# cat test-claim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 5Gi


[root@master deploy]# kubectl apply -f test-claim.yaml

persistentvolumeclaim/test-claim created

[root@master deploy]# kubectl get pvc

NAME         STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE

test-claim   Bound    pvc-463b03bc-6c60-4e3f-abe1-e1a98e70b52d   5Gi        RWX            nfs-client     <unset>                 3s

[root@master deploy]#

[root@master deploy]# kubectl get pv

NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE

pvc-463b03bc-6c60-4e3f-abe1-e1a98e70b52d   5Gi        RWX            Delete           Bound    memeda/test-claim   nfs-client     <unset>                          17s


5. 创建pod


[root@master deploy]# vim test-pod.yaml
[root@master deploy]# cat test-pod.yaml
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: uhub.service.ucloud.cn/iecloud.ljh/nginx
    imagePullPolicy: IfNotPresent
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

[root@master deploy]# kubectl apply -f test-pod.yaml

[root@master deploy]# kubectl exec -ti test-pod -- bash
root@test-pod:/# cd /mnt/
root@test-pod:/mnt# ls
SUCCESS
root@test-pod:/mnt# touch HEHEHE
root@test-pod:/mnt# ls
HEHEHE  SUCCESS

[root@nfs ~]# ls /share_data/memeda-test-claim-pvc-463b03bc-6c60-4e3f-abe1-e1a98e70b52d/
HEHEHE  SUCCESS


删除pod,删除pvc,观察是否自动删除pv和底层数据

[root@master deploy]# kubectl delete pvc test-claim
persistentvolumeclaim "test-claim" deleted
[root@master deploy]# kubectl get pvc
No resources found in memeda namespace.
[root@master deploy]# kubectl get pv
No resources found
[root@nfs ~]# ls /share_data/memeda-test-claim-pvc-463b03bc-6c60-4e3f-abe1-e1a98e70b52d/
ls: cannot access '/share_data/memeda-test-claim-pvc-463b03bc-6c60-4e3f-abe1-e1a98e70b52d/': No such file or directory
[root@nfs ~]# ls /share_data/
111  a1.txt  a2.txt  a3.txt  b1.txt  b2.txt  b3.txt  c1.txt  c2.txt  c3.txt

Deployment

无角色 前端web网站

一种控制器,在华为云CCE里面:无状态工作负载

deployment 无状态工作负载(dep控制器)

statefulset 有状态工作负载(sta控制器)https://blog.csdn.net/m0_60259116/article/details/140228620(案例mysql主备)

声明式文本:类似于yaml文件,你只需要告诉它最终实现什么结果即可,具体中间底层怎么实现,由集群自己来管理。

命令式文本:类似于编程 c/c++/java,每一步都要告诉程序,下一步怎么执行。

修改副本(三种方式):

deploy可通过yaml或命令行来创建。

1.17及之前kubectl run 名称,这个命令默认创建的是deploy

1.17之后,kubectl run 命令,这个命令默认创建的是pod


[root@master ~]# kubectl create deployment web --image uhub.service.ucloud.cn/iecloud.ljh/nginx --dry-run=client -o yaml > web.yaml
[root@master ~]# vim web.yaml
[root@master ~]# cat web.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: uhub.service.ucloud.cn/iecloud.ljh/nginx
        name: nginx
        resources: {}
status: {}

K8s基本概念(kubernetes)_kubernetes

[root@master ~]# kubectl create deployment web --image uhub.service.ucloud.cn/iecloud.ljh/nginx --dry-run=client -o yaml > web.yaml
[root@master ~]# vim web.yaml
[root@master ~]# cat web.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: uhub.service.ucloud.cn/iecloud.ljh/nginx
        name: nginx
        resources: {}
status: {}
[root@master ~]# kubectl apply -f web.yaml
deployment.apps/web created
[root@master ~]# kubectl get pod
NAME                   READY   STATUS    RESTARTS   AGE
web-55685895c7-nk725   1/1     Running   0          3s


副本数修改

在线edit

[root@master ~]# kubectl edit deployments.apps web


命令行scale

[root@master ~]# kubectl scale deployment web --replicas 10


修改yaml文件

[root@master ~]# vim web.yaml
[root@master ~]# kubectl apply -f web.yaml
deployment.apps/web configured

 HPA动态伸缩:

手工修改很是麻烦,因为你不知道流量什么时候暴增,什么时候骤减。

K8s基本概念(kubernetes)_kubernetes_02

创建HPA

[root@master ~]# kubectl autoscale deployment web --min 3 --max 10

horizontalpodautoscaler.autoscaling/web autoscaled

[root@master ~]# kubectl get hpa

NAME   REFERENCE        TARGETS              MINPODS   MAXPODS   REPLICAS   AGE

web    Deployment/web   cpu: <unknown>/80%   3         10        0          2s


默认没有指定容器使用cpu计算资源的额度。

[root@kmaster ~]# kubectl edit deployments.apps dep1


    spec:

      containers:

      - image: nginx

        imagePullPolicy: IfNotPresent

        name: nginx

        resources: {}

-----------------------------------------

    spec:

      containers:

      - image: nginx

        imagePullPolicy: IfNotPresent

        name: nginx

        resources:

          requests:

            cpu: 500m


[root@master ~]# kubectl edit deployments.apps web
deployment.apps/web edited
[root@master ~]# kubectl autoscale deployment web --min 3 --max 10 --cpu-percent 15
horizontalpodautoscaler.autoscaling/web autoscaled

[root@master ~]# kubectl get hpa

NAME   REFERENCE        TARGETS       MINPODS   MAXPODS   REPLICAS   AGE

web    Deployment/web   cpu: 0%/15%   3         10        3          17s

模拟外部访问压力测试

[root@master ~]# kubectl expose deployment web --port=80 --target-port=80 --type=NodePort
service/web exposed
[root@master ~]# kubectl get svc
NAME   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
web    NodePort   10.108.142.223   <none>        80:30382/TCP   5s
[root@master ~]# ls /mnt/
hgfs
[root@master ~]# mount /dev/cdrom /mnt/
mount: /mnt: WARNING: device write-protected, mounted read-only.
[root@master ~]# ls /mnt/
AppStream  BaseOS  EFI  images  isolinux  LICENSE  media.repo  TRANS.TBL
[root@master ~]# yum install -y httpd-tools.x86_64


安装ab工具,ab是apachebench命令的缩写,ab是apache自带的压力测试工具。
[root@kmaster ~]# ab -h
[root@kmaster ~]# ab -t 600 -n 1000000 -c 1000 http://192.168.44.100:30382/index.html
This is ApacheBench, Version 2.3 <$Revision: 1843412 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/

Benchmarking 192.168.100.146 (be patient)
Completed 100000 requests
Completed 200000 requests
Completed 300000 requests
Completed 400000 requests
Completed 500000 requests


等待,观察top情况及pod数量。


[root@master ~]# kubectl get hpa

NAME   REFERENCE        TARGETS        MINPODS   MAXPODS   REPLICAS   AGE

web    Deployment/web   cpu: 14%/15%   3         10        7          9m12s



[root@master ~]# kubectl get hpa

NAME   REFERENCE        TARGETS       MINPODS   MAXPODS   REPLICAS   AGE

web    Deployment/web   cpu: 0%/15%   3         10        3          15m

镜像滚动更新

1.通过edit修改

K8s基本概念(kubernetes)_kubernetes_03

2.通过命令行修改

[root@master ~]# kubectl set image deploy web nginx=uhub.service.ucloud.cn/iecloud.ljh/nginx
deployment.apps/web image updated

1. 通过yaml文件

修改文件后,通过apply应用即可


其他控制器


statefulset有状态工作负载

1. 创建服务(无头服务,本质还是svc)  一些后端服务,数据库啊什么什么的

[root@master ~]# vim stasvc.yaml
[root@master ~]# cat stasvc.yaml
apiVersion: v1
kind: Service       # 对象类型为Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
    - name: nginx     # Pod间通信的端口名称
      port: 80        # Pod间通信的端口号
  selector:
    app: nginx        # 选择标签为app:nginx的Pod
  clusterIP: None     # 必须设置为None,表示Headless Service
[root@master ~]# kubectl get svc
NAME   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
web    NodePort   10.108.142.223   <none>        80:30382/TCP   3h32m
[root@master ~]#
[root@master ~]# kubectl apply -f stasvc.yaml
service/nginx created
[root@master ~]# kubectl get svc
NAME    TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
nginx   ClusterIP   None             <none>        80/TCP         1s

2. 配置动态卷(delete)

创建NFS控制器

kubectl apply -f deployment.yaml

授权

kubectl apply -f rbac.yaml

创建存储类

kubectl apply -f class.yaml

创建pvc

kubectl apply -f test-claim.yaml

K8s基本概念(kubernetes)_kubernetes_04

3. 创建statefuleset

[root@master ~]# cat sta.yaml

apiVersion: apps/v1

kind: StatefulSet

metadata:

  name: nginx

spec:

  serviceName: nginx                             # headless service的名称

  replicas: 3

  selector:

    matchLabels:

      app: nginx

  template:

    metadata:

      labels:

        app: nginx

    spec:

      containers:

        - name: container-0

          image: uhub.service.ucloud.cn/iecloud.ljh/nginx

          resources:

            limits:

              cpu: 100m

              memory: 200Mi

            requests:

              cpu: 100m

              memory: 200Mi

          volumeMounts:                           # Pod挂载的存储

          - name:  data

            mountPath:  /usr/share/nginx/html     # 存储挂载到/usr/share/nginx/html

  volumeClaimTemplates:

  - metadata:

      name: data

    spec:

      accessModes:

      - ReadWriteMany

      resources:

        requests:

          storage: 1Gi

      storageClassName: nfs-client                   # 持久化存储的类型


4. 查看

[

root@master ~]# kubectl get pod
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-6cfc5f7988-86l2n   1/1     Running   0          7m27s
nginx-0                                   1/1     Running   0          5m34s
nginx-1                                   1/1     Running   0          5m31s
nginx-2                                   1/1     Running   0          5m28s

K8s基本概念(kubernetes)_kubernetes_05

[root@master ~]# kubectl run -i --tty --image ccr.ccs.tencentyun.com/huanghuanhui/jessie-dnsutils:1.3 dnsutils --restart=Never --rm /bin/sh
If you don't see a command prompt, try pressing enter.

# nslookup nginx-0.nginx
Server:         10.96.0.10
Address:        10.96.0.10#53

Name:   nginx-0.nginx.memeda.svc.cluster.local
Address: 10.244.104.45

#
# nslookup nginx-1.nginx
Server:         10.96.0.10
Address:        10.96.0.10#53

Name:   nginx-1.nginx.memeda.svc.cluster.local
Address: 10.244.166.136

# nslookup nginx-2.nginx
Server:         10.96.0.10
Address:        10.96.0.10#53

Name:   nginx-2.nginx.memeda.svc.cluster.local
Address: 10.244.104.48

这时候,可以随机删除pod


# nslookup nginx-1.nginx
Server:         10.96.0.10
Address:        10.96.0.10#53

Name:   nginx-1.nginx.memeda.svc.cluster.local
Address: 10.244.166.137

观察存储状态

[root@master ~]# kubectl exec -ti nginx-0 -- bash
root@nginx-0:/# cd /usr/share/nginx/html/
root@nginx-0:/usr/share/nginx/html# ls
root@nginx-0:/usr/share/nginx/html#
root@nginx-0:/usr/share/nginx/html# echo 1111 > index.html

[root@master ~]# kubectl exec -ti nginx-0 -- curl localhost
1111

[root@master ~]# kubectl exec -ti nginx-1 -- bash
root@nginx-1:/# echo 2222 > /usr/share/nginx/html/index.html
root@nginx-1:/# exit
exit
[root@master ~]# kubectl exec -ti nginx-1 -- curl localhost
2222

底层存储也对应有

K8s基本概念(kubernetes)_kubernetes_06

这时候,删除任意一个pod,都会创建同名pod,加载对应的pvc,pod对应的数据也不会变。

所以statefulset的有状态就体现在这里,各是各的,有角色和网络id之分,使用的底层存储也不一样。

daemonset

守护进程集   (无副本机制)

DaemonSet(守护进程集)在集群的每个节点上运行一个Pod,且保证只有一个Pod,非常适合一些系统层面的应用,例如日志收集、资源监控等,这类应用需要每个节点都运行,且不需要太多实例,一个比较好的例子就是Kubernetes的kube-proxy。

DaemonSet跟节点相关,如果节点异常,也不会在其他节点重新创建。

[root@master ~]# vim ds1.yaml
[root@master ~]# cat ds1.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  creationTimestamp: null
  labels:
    app: ds1
  name: ds1
spec:
  selector:
    matchLabels:
      app: ds1
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: ds1
    spec:
      containers:
      - command:
        - sh
        - -c
        - sleep 36000
        image: ccr.ccs.tencentyun.com/tencentyun/busybox
        name: busybox
        resources: {}

[root@master ~]# kubectl apply -f ds1.yaml
[root@master ~]# kubectl get pod
NAME                                      READY   STATUS             RESTARTS       AGE
dnsutils                                  0/1     Completed          0              45m
ds1-8hdzl                                 1/1     Running            0              6s
ds1-xlmnb                                 1/1     Running            0              6s

master上有污点。

service

pod创建好之后,怎么访问?

[root@master ~]# vim pod1.yaml

[root@master ~]# cat pod1.yaml

apiVersion: v1

kind: Pod

metadata:

  creationTimestamp: null

  labels:

    run: pod1

  name: pod1

spec:

  containers:

  - image: uhub.service.ucloud.cn/iecloud.ljh/nginx

    name: pod1

    ports:

    - containerPort: 80

      hostPort: 5000

    resources: {}

  dnsPolicy: ClusterFirst

  restartPolicy: Always

status: {}

[root@master ~]# kubectl apply -f pod1.yaml

基本的东西要记住:

ssh 22

http 80

https(SSL)443

mysql 3306

apache (httpd)网站根目录:/var/www/html

nginx网站根目录:/usr/share/nginx/html


确定pod1是在哪个节点上运行的

K8s基本概念(kubernetes)_kubernetes_07

[root@master ~]# kubectl exec -ti pod1 -- bash
root@pod1:/# cd /usr/share/nginx/html/
root@pod1:/usr/share/nginx/html# ls
50x.html  index.html
root@pod1:/usr/share/nginx/html# echo 111111111 > index.html

K8s基本概念(kubernetes)_kubernetes_08

现在问题是:如果pod太多,对每个pod都设置一个端口,就会导致安全问题,也不方便去管理。这时候k8s引入了一个资源对象,叫Service(SVC)

SVC就是一个负载均衡器。


创建一个SVC

[root@master ~]# cat web.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: swr.cn-north-4.myhuaweicloud.com/tianmeili/nginx:1.0
        name: nginx
        resources: {}
status: {}
[root@master ~]#
[root@master ~]#
[root@master ~]# kubectl apply -f web.yaml

[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-bjpn2 -- bash
root@web-5c7d8bcc5d-bjpn2:/# echo 11111 > /usr/share/nginx/html/index.html
root@web-5c7d8bcc5d-bjpn2:/# exit
exit
[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-n97hj -- bash
root@web-5c7d8bcc5d-n97hj:/# echo 2222 > /usr/share/nginx/html/index.html
root@web-5c7d8bcc5d-n97hj:/# exit
exit
[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-rrc5h -- bash
root@web-5c7d8bcc5d-rrc5h:/# echo 3333 > /usr/share/nginx/html/index.html
root@web-5c7d8bcc5d-rrc5h:/# exit
exit


为这个deploy创建一个svc

[root@master ~]# kubectl expose deployment web --name svc1 --port 80

service/svc1 exposed

[root@master ~]# kubectl get svc

NAME    TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE

nginx   ClusterIP   None            <none>        80/TCP    18h

svc1    ClusterIP   10.99.216.224   <none>        80/TCP    3s


注意这个clusterIP地址,集群内部使用的ip地址,只能集群内部去访问。

K8s基本概念(kubernetes)_kubernetes_09

测试访问,观察负载均衡

[root@master ~]# curl 10.99.216.224
11111
[root@master ~]# curl 10.99.216.224
2222
[root@master ~]# curl 10.99.216.224
2222
[root@master ~]# curl 10.99.216.224
3333
[root@master ~]# curl 10.99.216.224
2222
[root@master ~]# curl 10.99.216.224
3333
[root@master ~]# curl 10.99.216.224
11111
[root@master ~]# curl 10.99.216.224
2222


SVC是怎么知道把流量分发到后端哪些pod呢?

[root@master ~]# kubectl get svc -o wide

NAME    TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE     SELECTOR

nginx   ClusterIP   None            <none>        80/TCP    18h     app=nginx

svc1    ClusterIP   10.99.216.224   <none>        80/TCP    5m49s   app=web

SVC是通过SELECTOR这个字段里面显示的标签进行匹配的。而不是通过SVC本身自己的标签匹配的。

[root@master ~]# kubectl edit svc svc1

K8s基本概念(kubernetes)_kubernetes_10

如果SVC存在多个匹配标签,它只会最后一个生效。

服务的发现

K8s基本概念(kubernetes)_kubernetes_11

一共有3种方式

ClusterIP方式

[root@master ~]# kubectl run db --image uhub.service.ucloud.cn/iecloud.ljh/mysql  --env MYSQL_ROOT_PASSWORD=redhat --env MYSQL_DATABASE=wordpress --dry-run=client -o yaml > db.yaml
[root@master ~]# vim db.yaml
[root@master ~]# cat db.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: db
  name: db
spec:
  containers:
  - env:
    - name: MYSQL_ROOT_PASSWORD
      value: redhat
    - name: MYSQL_DATABASE
      value: wordpress
    image: uhub.service.ucloud.cn/iecloud.ljh/mysql 
    name: db
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}


[root@master ~]# kubectl apply -f db.yaml

[root@master ~]# kubectl get pod
NAME                                      READY   STATUS      RESTARTS      AGE
db                                        1/1     Running     0             82s

为mysql创建svc

[root@master ~]# kubectl expose pod db --name dbsvc --port 3306 --target-port 3306 (默认svc类型就是Cluster IP)

service/dbsvc exposed

[root@master ~]# kubectl get svc

NAME    TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE

dbsvc   ClusterIP   10.102.176.113   <none>        3306/TCP   2s


创建博客,直接使用clusterip即可

[root@master ~]# kubectl run blog --image uhub.service.ucloud.cn/iecloud.ljh/wordpress:latest --env WORDPRESS_DB_HOST=10.99.216.224 --env WORDPRESS_DB_USER=root --env WORDPRESS_DB_PASSWORD=redhat --env WORDPRESS_DB_NAME=wordpress --dry-run=client -o yaml > blog.yaml
[root@master ~]# cat blog.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: blog
  name: blog
spec:
  containers:
  - env:
    - name: WORDPRESS_DB_HOST

      value: 10.102.176.113

- name: WORDPRESS_DB_USER
      value: root
    - name: WORDPRESS_DB_PASSWORD
      value: redhat
    - name: WORDPRESS_DB_NAME
      value: wordpress
    image: uhub.service.ucloud.cn/iecloud.ljh/wordpress:latest
    name: blog
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
[root@master ~]# kubectl apply -f blog.yaml
[root@master ~]# kubectl get pod blog -o wide
NAME   READY   STATUS    RESTARTS   AGE    IP              NODE    NOMINATED NODE   READINESS GATES
blog   1/1     Running   0          115s   10.244.104.11   node2   <none>           <none>


为blog创建svc(类型NodePort)

[root@master ~]# kubectl expose pod blog --name blogsvc --port 80 --target-port 80 --type NodePort

service/blogsvc exposed

[root@master ~]# kubectl get svc

NAME      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE

blogsvc   NodePort    10.99.124.244    <none>        80:32503/TCP   3s


打开浏览器,输入任意节点ip加端口号32503即可

K8s基本概念(kubernetes)_kubernetes_12

变量方式

当你在集群环境中,创建一个pod,那么这个pod里面就包含了当前集群中所有svc的变量信息。

[root@master ~]# vim blog.yaml
[root@master ~]# cat blog.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: blog
  name: blog
spec:
  containers:
  - env:
    - name: WORDPRESS_DB_HOST

      value: $(DBSVC_SERVICE_HOST)

- name: WORDPRESS_DB_USER
      value: root
    - name: WORDPRESS_DB_PASSWORD
      value: redhat
    - name: WORDPRESS_DB_NAME
      value: wordpress
    image: uhub.service.ucloud.cn/iecloud.ljh/wordpress:latest
    name: blog
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}

DNS方式(推荐这种方式)

直接指定svc的名字即可。

[root@master ~]# vim blog.yaml
[root@master ~]# cat blog.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: blog
  name: blog
spec:
  containers:
  - env:
    - name: WORDPRESS_DB_HOST

      value: dbsvc

 

- name: WORDPRESS_DB_USER
      value: root
    - name: WORDPRESS_DB_PASSWORD
      value: redhat
    - name: WORDPRESS_DB_NAME
      value: wordpress
    image: uhub.service.ucloud.cn/iecloud.ljh/wordpress:latest
    name: blog
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
[root@master ~]# kubectl apply -f blog.yaml
pod/blog created
[root@master ~]# kubectl get pod
NAME                                      READY   STATUS      RESTARTS       AGE
blog                                      1/1     Running     0              3s


dns原理

当创建blog的时候,发现blog里面指定的数据库的主机是一个svc(名字),blog是如何通过这个名字找到对应的ip地址呢?

[root@master ~]# kubectl exec -ti blog -- bash
root@blog:/var/www/html# cat /etc/resolv.conf
search memeda.svc.cluster.local svc.cluster.local cluster.local
nameserver 10.96.0.10
options ndots:5
root@blog:/var/www/html#

你去找nameserver 10.96.0.10,它知道。

这个10.96.0.10是谁?它是一个SVC的ip地址。

[root@master ~]# kubectl get svc -n kube-system

NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE

kube-dns         ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   2d19h


根据之前学过的selector标签,可以确定这个svc管理哪些pod的

[root@master ~]# kubectl get svc -o wide -n kube-system

NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE     SELECTOR

kube-dns         ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   2d19h   k8s-app=kube-dns


哪些pod是这个标签呢?

[root@master ~]# kubectl get pod -l k8s-app=kube-dns -n kube-system

NAME                       READY   STATUS    RESTARTS       AGE

coredns-7b5944fdcf-6wdgd   1/1     Running   3 (108m ago)   2d19h

coredns-7b5944fdcf-tcsvq   1/1     Running   3 (108m ago)   2d19h


这俩cordns的pod就是集群内的dns解析服务器。

每创建一个svc,svc都会把自己往dns服务器上去注册,名字和ip地址都会记录到dns服务里面。


这里面有个小细节:我们svc和pod都是在memeda命名空间里面,如果我在default命令空间中,创建blog和blogsvc,那么通过memeda命名空间的(dbsvc)能否访问呢?

[root@master ~]# kubens default
Context "kubernetes-admin@kubernetes" modified.
Active namespace is "default".

[root@master ~]# vim blog.yaml
[root@master ~]# cat blog.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: blog
  name: blog
spec:
  containers:
  - env:
    - name: WORDPRESS_DB_HOST

      value: dbsvc.memeda

- name: WORDPRESS_DB_USER
      value: root
    - name: WORDPRESS_DB_PASSWORD
      value: redhat
    - name: WORDPRESS_DB_NAME
      value: wordpress
    image: uhub.service.ucloud.cn/iecloud.ljh/wordpress:latest
    name: blog
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}

[root@master ~]# kubectl expose pod blog --name svc01 --port 80 --target-port 80 --type NodePort

[root@master ~]# kubectl get svc

NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE

kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP        2d19h

svc01        NodePort    10.97.21.101   <none>        80:30829/TCP   4s

K8s基本概念(kubernetes)_kubernetes_13

dns可以跨命名空间进行svc的访问,但是clusterip和变量是没办法进行跨命名空间访问的。

服务的发布

NodePort

[root@master ~]# cat web.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: swr.cn-north-4.myhuaweicloud.com/tianmeili/nginx:1.0
        name: nginx
        resources: {}
status: {}

[root@master ~]# kubectl apply -f web.yaml

[root@master ~]# kubectl expose deployment web --name websvc --port 80 --target-port 80 --type NodePort

[root@master ~]# kubectl get svc
NAME      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
blogsvc   NodePort    10.99.124.244    <none>        80:32503/TCP   3h23m
dbsvc     ClusterIP   10.102.176.113   <none>        3306/TCP       3h33m
nginx     ClusterIP   None             <none>        80/TCP         23h
svc1      ClusterIP   10.99.216.224    <none>        80/TCP         4h33m
websvc    NodePort    10.104.60.12     <none>        80:30692/TCP   8s

http://192.168.44.100:30692/

http://192.168.44.101:30692/

http://192.168.44.102:30692/


Ingress

Service是基于四层TCP和UDP协议转发的,而Ingress可以基于七层的HTTP和HTTPS协议转发,可以通过域名和路径做到更细粒度的划分,如下图所示。


Ingress-Service

K8s基本概念(kubernetes)_kubernetes_14

1. 创建3个pod

[root@master ~]# kubectl run pod1 --image registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888 --image-pull-policy Never
[root@master ~]# kubectl run pod2 --image registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888 --image-pull-policy Never
[root@master ~]# kubectl run pod3 --image registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888 --image-pull-policy Never

[root@master ~]# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
pod1   1/1     Running   0          14s
pod2   1/1     Running   0          8s
pod3   1/1     Running   0          3s


2. 创建svc

[root@master ~]# kubectl expose pod pod1 --name svc1 --port 80 --target-port 80
[root@master ~]# kubectl expose pod pod2 --name svc2 --port 80 --target-port 80
[root@master ~]#
[root@master ~]# kubectl expose pod pod3 --name svc3 --port 80 --target-port 80

[root@master ~]# kubectl get svc
NAME   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
svc1   ClusterIP   10.105.36.198   <none>        80/TCP    17s
svc2   ClusterIP   10.104.7.149    <none>        80/TCP    11s
svc3   ClusterIP   10.106.170.56   <none>        80/TCP    4s


3. 修改网站内容

[root@master ~]# kubectl exec -ti pod1 -- sh -c 'echo 1111 > /usr/share/nginx/html/index.html'
[root@master ~]# kubectl exec -ti pod2 -- sh -c 'echo 2222 > /usr/share/nginx/html/index.html'
[root@master ~]# kubectl exec -ti pod3 -- sh -c 'echo 3333 > /usr/share/nginx/html/index.html'

4. ingress控制器

官方有帮我们做好,直接下载对应的镜像,运行yaml文件,它就会帮我们把所有的环境搭建好。

[root@master ~]# kubectl apply -f deploy.yaml
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
serviceaccount/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
configmap/ingress-nginx-controller created
service/ingress-nginx-controller created
service/ingress-nginx-controller-admission created
deployment.apps/ingress-nginx-controller created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created

[root@master ~]# kubectl get deployments.apps -n ingress-nginx
NAME                       READY   UP-TO-DATE   AVAILABLE   AGE
ingress-nginx-controller   1/1     1            1           4m33s

[root@master ~]# kubectl get pod -n ingress-nginx
NAME                                        READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-kwwfk        0/1     Completed   0          2m26s
ingress-nginx-admission-patch-cm7ps         0/1     Completed   2          2m26s

ingress-nginx-controller-77d8b9c46b-zzsmv   1/1     Running     0          2m26s


5. 创建NodePort SVC

因为默认本应该使用LoadBalancer类型的EXTERNALIP地址去访问,可是我们当前环境并没有去配置,所以单独来创建一个SVC使用就行了。

[root@master ~]# kubens ingress-nginx
Context "kubernetes-admin@kubernetes" modified.
Active namespace is "ingress-nginx".
[root@master ~]# kubectl expose deploy ingress-nginx-controller --name svcinss --type NodePort
service/svcinss exposed
[root@master ~]# kubectl get svc
NAME                                 TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)                                     AGE
ingress-nginx-controller             LoadBalancer   10.97.53.69      <pending>     80:32366/TCP,443:32390/TCP                  8m49s
ingress-nginx-controller-admission   ClusterIP      10.107.176.122   <none>        443/TCP                                     8m49s
svcinss                              NodePort       10.99.249.26     <none>        80:30162/TCP,443:32443/TCP,8443:32751/TCP   2s

[root@master ~]# kubens abc
Context "kubernetes-admin@kubernetes" modified.
Active namespace is "abc".
[root@master ~]#

6. 创建ingress类

这个类其实已经存在了,已经帮我们创建好了。这个类管理和操作路由规则的,让这些规则生效的。

[root@master ~]# kubectl edit ingressclasses.networking.k8s.io
ingressclass.networking.k8s.io/nginx edited

添加ingressclass一行

  annotations:

    ingressclass.kubernetes.io/is-default-class: "true"

    kubectl.kubernetes.io/last-applied-configuration: |


7. 创建路由规则

[root@master ~]# vim rouinss.yaml
[root@master ~]# cat rouinss.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-wildcard-host
spec:
  rules:
  - host: "www.keji.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: svc1
            port:
              number: 80
  - host: "www.jiaoyu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: svc2
            port:
              number: 80
  - host: "www.qiche.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: svc3
            port:
              number: 80
[root@master ~]# kubectl get ingress
NAME                    CLASS   HOSTS                                       ADDRESS   PORTS   AGE
ingress-wildcard-host   nginx   www.keji.com,www.jiaoyu.com,www.qiche.com             80      27s
[root@master ~]#
[root@master ~]# kubectl describe ingress ingress-wildcard-host
Name:             ingress-wildcard-host
Labels:           <none>
Namespace:        abc
Address:
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host            Path  Backends
  ----            ----  --------
  www.keji.com
                  /   svc1:80 (10.244.104.25:80)
  www.jiaoyu.com
                  /   svc2:80 (10.244.104.14:80)
  www.qiche.com
                  /   svc3:80 (10.244.104.18:80)
Annotations:      <none>
Events:
  Type    Reason  Age   From                      Message

  ----    ------  ----  ----                      -------

  Normal  Sync    46s   nginx-ingress-controller  Scheduled for sync


测试

K8s基本概念(kubernetes)_kubernetes_15

secret和configmap

secret

涉及一些变量参数值、密码值等类似于这种,特别适合用secret进行封装。

将你这些值封装成一个secret(k8s里面的一个对象资源),未来创建mysql的时候需要调用加载密码,直接调用secret里面的值即可。


[root@master ~]# kubectl create secret generic --help |grep from

    --from-env-file=[]:

    --from-file=[]:

    --from-literal=[]:

[root@master ~]# kubectl create secret generic sec01 --from-literal=aaa=111 --from-literal=MYSQL_ROOT_PASSWOR=redhat

secret/sec01 created

[root@master ~]# kubectl get secrets

NAME    TYPE     DATA   AGE

sec01   Opaque   2      4s


[root@master ~]# echo 11111 > index.html

[root@master ~]#

[root@master ~]# ls index.html

index.html

[root@master ~]# cat index.html

11111


[root@master ~]# kubectl create secret generic sec02 --from-file=index.html

[root@master ~]# kubectl get secrets

NAME    TYPE     DATA   AGE

sec01   Opaque   2      6m38s

sec02   Opaque   1      13s


[root@master ~]# vim name.txt

[root@master ~]# cat name.txt

name1=zhangsan

name2=lisi

name3=wangwu

MYSQL_ROOT_PASSWORD=redhat

[root@master ~]#

[root@master ~]# kubectl create secret generic sec03 --from-env-file=name.txt

secret/sec03 created

[root@master ~]# kubectl get secrets

NAME    TYPE     DATA   AGE

sec01   Opaque   2      8m30s

sec02   Opaque   1      2m5s

sec03   Opaque   4      5s


使用secret创建mysql容器

[root@master ~]# kubectl create secret generic sec01 --from-literal=dbpass=redhat

secret/sec01 created

[root@master ~]# kubectl get secrets

NAME    TYPE     DATA   AGE

sec01   Opaque   1      3s


[root@master ~]# vim db.yaml

[root@master ~]# cat db.yaml

apiVersion: v1

kind: Pod

metadata:

  creationTimestamp: null

  labels:

    run: db

  name: db

spec:

  containers:

  - env:

    - name: MYSQL_ROOT_PASSWORD

      valueFrom:

        secretKeyRef:

          name: sec01

          key: dbpass

    image: registry.cn-hangzhou.aliyuncs.com/cloudcs/mysql:888

    imagePullPolicy: IfNotPresent

    name: db

    resources: {}

  dnsPolicy: ClusterFirst

  restartPolicy: Always

status: {}


[root@master ~]# kubectl apply -f db.yaml

pod/db created

[root@master ~]# kubectl get pod

NAME   READY   STATUS    RESTARTS   AGE

db     1/1     Running   0          2s

pod1   1/1     Running   0          101m

pod2   1/1     Running   0          101m

pod3   1/1     Running   0          101m


[root@master ~]# kubectl get pod -o wide

NAME   READY   STATUS    RESTARTS   AGE    IP              NODE    NOMINATED NODE   READINESS GATES

db     1/1     Running   0          35s    10.244.104.16   node2   <none>           <none>

pod1   1/1     Running   0          101m   10.244.104.25   node2   <none>           <none>

pod2   1/1     Running   0          101m   10.244.104.14   node2   <none>           <none>

pod3   1/1     Running   0          101m   10.244.104.18   node2   <none>           <none>

[root@master ~]# mysql -uroot -predhat -h 10.244.104.16

Welcome to the MariaDB monitor.  Commands end with ; or \g.

Your MySQL connection id is 8

Server version: 8.0.27 MySQL Community Server - GPL

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MySQL [(none)]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

| sys                |

+--------------------+

4 rows in set (0.003 sec)

MySQL [(none)]>

加载多个参数

[root@master ~]#kubectl create secret generic sec02 --from-literal dbname=wordpress

[root@master ~]# cat db.yaml

apiVersion: v1

kind: Pod

metadata:

  creationTimestamp: null

  labels:

    run: db

  name: db

spec:

  containers:

  - env:

    - name: MYSQL_ROOT_PASSWORD

      valueFrom:

        secretKeyRef:

          name: sec01

          key: dbpass

    - name: MYSQL_DATABASE

      valueFrom:

        secretKeyRef:

          name: sec02

          key: dbname

    image: registry.cn-hangzhou.aliyuncs.com/cloudcs/mysql:888

    imagePullPolicy: IfNotPresent

    name: db

    resources: {}

  dnsPolicy: ClusterFirst

  restartPolicy: Always

status: {}

[root@master ~]# kubectl apply -f db.yaml

pod/db created

[root@master ~]# kubectl get pod

NAME   READY   STATUS    RESTARTS   AGE

db     1/1     Running   0          2s

pod1   1/1     Running   0          107m

pod2   1/1     Running   0          107m

pod3   1/1     Running   0          107m

[root@master ~]# kubectl get pod -o wide

NAME   READY   STATUS    RESTARTS   AGE    IP              NODE    NOMINATED NODE   READINESS GATES

db     1/1     Running   0          8s     10.244.104.17   node2   <none>           <none>

pod1   1/1     Running   0          107m   10.244.104.25   node2   <none>           <none>

pod2   1/1     Running   0          107m   10.244.104.14   node2   <none>           <none>

pod3   1/1     Running   0          107m   10.244.104.18   node2   <none>           <none>

[root@master ~]# mysql -uroot -predhat -h 10.244.104.17

Welcome to the MariaDB monitor.  Commands end with ; or \g.

Your MySQL connection id is 8

Server version: 8.0.27 MySQL Community Server - GPL


Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.


Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.


MySQL [(none)]> show databases;

+--------------------+

| Database           |

+--------------------+

| information_schema |

| mysql              |

| performance_schema |

| sys                |

| wordpress          |

+--------------------+

5 rows in set (0.004 sec)

MySQL [(none)]>

configmap

特别适合封装参数文件、配置文件等。和secret不同的是,configmap不加密。它是明文显示的。


[root@master ~]# kubectl create configmap --help |grep from
  kubectl create configmap my-config --from-file=path/to/bar
  kubectl create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt
  kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2
  # Create a new config map named my-config from the key=value pairs in the file
  kubectl create configmap my-config --from-file=path/to/bar
  # Create a new config map named my-config from an env file
  kubectl create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env
    --from-env-file=[]:
    --from-file=[]:
    --from-literal=[]:

举个例子

创建一个web的deployment控制器,副本数3个。

[root@master ~]# cat web.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: swr.cn-north-4.myhuaweicloud.com/tianmeili/nginx:1.0
        name: nginx
        resources: {}
status: {}

[root@master ~]# kubectl apply -f web.yaml
deployment.apps/web created
[root@master ~]# kubectl get pod
NAME                   READY   STATUS    RESTARTS   AGE
db                     1/1     Running   0          6m37s
pod1                   1/1     Running   0          114m
pod2                   1/1     Running   0          114m
pod3                   1/1     Running   0          114m
web-5c7d8bcc5d-fjrjc   1/1     Running   0          3s
web-5c7d8bcc5d-gcvkq   1/1     Running   0          3s
web-5c7d8bcc5d-ml6bv   1/1     Running   0          3s

[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-fjrjc -- sh -c 'echo 1111 > /usr/share/nginx/html/index.html'
[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-gcvkq -- sh -c 'echo 1111 > /usr/share/nginx/html/index.html'
[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-ml6bv -- sh -c 'echo 1111 > /usr/share/nginx/html/index.html'
[root@master ~]#

未来不管通过svc负载均衡到哪个pod上,上层应用看到的内容都一样。

可是,现在有个需求,我要更新nginx的镜像了。

[root@master ~]# kubectl exec -ti web-5c7d8bcc5d-ml6bv -- sh -c 'nginx -v'
nginx version: nginx/1.19.6
现在要更换nginx版本registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888
kubectl set image deploy web nginx=registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888
[root@master ~]# kubectl set image deploy web nginx=registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888
deployment.apps/web image updated
[root@master ~]# kubectl get pod
NAME                   READY   STATUS    RESTARTS   AGE
db                     1/1     Running   0          10m
pod1                   1/1     Running   0          118m
pod2                   1/1     Running   0          118m
pod3                   1/1     Running   0          118m
web-55685895c7-4vjvt   1/1     Running   0          5s
web-55685895c7-lb74n   1/1     Running   0          7s
web-55685895c7-lxpxz   1/1     Running   0          4s
[root@master ~]# kubectl exec -ti web-55685895c7-4vjvt -- sh -c 'nginx -v'
nginx version: nginx/1.21.5

可是问题来了,当你更新后,原来的网站内容还在吗?

怎么解决?通过封装configmap就可以解决

[root@master ~]# cat index.html

11111

[root@master ~]#

[root@master ~]# kubectl create configmap cm666 --from-file index.html

configmap/cm666 created

[root@master ~]# kubectl get cm

NAME               DATA   AGE

cm1                1      9m51s

cm666              1      3s

kube-root-ca.crt   1      122m

[root@master ~]# cat web.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  creationTimestamp: null

  labels:

    app: web

  name: web

spec:

  replicas: 3

  selector:

    matchLabels:

      app: web

  strategy: {}

  template:

    metadata:

      creationTimestamp: null

      labels:

        app: web

    spec:

      containers:

      - image: swr.cn-north-4.myhuaweicloud.com/tianmeili/nginx:1.0

        name: nginx

        volumeMounts:

        - name: foo

          mountPath: "/usr/share/nginx/html/"

          readOnly: true

        resources: {}

      volumes:

      - name: foo

        configMap:

          name: cm666

status: {}

[root@master ~]# kubectl apply -f web.yaml

[root@master ~]# kubectl get pod

NAME                   READY   STATUS    RESTARTS   AGE

db                     1/1     Running   0          18m

pod1                   1/1     Running   0          126m

pod2                   1/1     Running   0          125m

pod3                   1/1     Running   0          125m

web-5cff98694b-9gngr   1/1     Running   0          3s

web-5cff98694b-dp9fg   1/1     Running   0          3s

web-5cff98694b-l2254   1/1     Running   0          3s

[root@master ~]# kubectl exec -ti web-5cff98694b-9gngr -- sh -c 'cat /usr/share/nginx/html/index.html'

11111

[root@master ~]# kubectl exec -ti web-5cff98694b-9gngr -- sh -c 'nginx -v'

nginx version: nginx/1.19.6

[root@master ~]# kubectl set image deploy web nginx=registry.cn-hangzhou.aliyuncs.com/cloudcs/nginx:888

[root@master ~]# kubectl get pod
NAME                   READY   STATUS    RESTARTS   AGE
db                     1/1     Running   0          19m
pod1                   1/1     Running   0          127m
pod2                   1/1     Running   0          127m
pod3                   1/1     Running   0          127m
web-55f44cfd8c-hn59j   1/1     Running   0          6s
web-55f44cfd8c-l52zk   1/1     Running   0          3s
web-55f44cfd8c-m55gp   1/1     Running   0          5s
[root@master ~]# kubectl exec -ti web-55f44cfd8c-hn59j -- sh -c 'cat /usr/share/nginx/html/index.html'
11111