动态存储卷供应使用StorageClass进行实现,其允许存储卷按需被创建。如果没有动态存储供应,Kubernetes集群的管理员将不得不通过手工的方式类创建新的存储卷; 通过动态存储卷,Kubernetes将能够按照用户的需要,自动创建其需要的存储.部署storageclass首先得有一个存储的供应者(provisioner),如AWS EBS、AzureFile、AzureDisk、CephFS、Cinder、FC、iSCSI、NFS等等,本实验基于NFS provisioner实现。
1、配置NFS服务
在管理节点(10.0.0.1)上部署NFS服务,也可以用独立的服务器部署NFS服务端,共享的数据目录为/home/pvdata/share(777权限);
[root@k8s-node01 storageclass]# cat /etc/exports
/home/pvdata/share *(rw,sync,insecure,no_subtree_check,no_root_squash) #PV动态供应
[root@k8s-node01 nginx]#exportfs -r #nfs配置生效
[root@k8s-node01 nginx]#exportfs #查看NFS共享目录
2、创建nfs-provisioner
[root@k8s-node01 storageclass]# cat serviceaccount.yaml #配置nfs-provisioner 的SA账号
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
[root@k8s-node01 storageclass]# kubectl apply -f serviceaccount.yaml #执行该文件
[root@k8s-node01 storageclass]# cat service-rbac.yaml #对sa账号做rbac授权
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
[root@k8s-node01 storageclass]# kubectl apply -f service-rbac.yaml #执行该文件
[root@k8s-node01 storageclass]# cat nfs-provisioner-deploy.yaml #创建pod用来运行nfs-provisioner
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: jim.com/nfs #可自定义
- name: NFS_SERVER
value: 10.0.0.1 #对应NFS服务器IP地址
- name: NFS_PATH
value: /home/pvdata/share #对应NFS配置好的共享目录
volumes:
- name: nfs-client-root
nfs:
server: 10.0.0.1 #对应NFS服务器IP地址
path: /home/pvdata/share #对应NFS配置好的共享目录
[root@k8s-node01 storageclass]# kubectl apply -f nfs-provisioner-deploy.yaml #执行该文件
[root@k8s-node01 storageclass]# kubectl get pod -l app=nfs-provisioner #检查生成的pod;
NAME READY STATUS RESTARTS AGE
nfs-provisioner-6bbc9fcd47-79m8z 1/1 Running 0 80m
3、配置storageclass存储类
[root@k8s-node01 storageclass]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-storageclass #命名,可自定义,后面PVC需要引用
provisioner: jim.com/nfs #对应环境变量PROVISIONER_NAME的值
[root@k8s-node01 storageclass]# kubectl apply -f storageclass.yaml #执行该文件
[root@k8s-node01 storageclass]# kubectl get storageclasses #检查该存储类是否创建成功
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storageclass jim.com/nfs Delete Immediate false 87m
到此NFS StorageClass存储类服务创建成功!!!
4、部署statefulset有状态应用进行验证
[root@k8s-node01 storageclass]# cat statefulset-storage.yaml # #创建nginx web站点应用
apiVersion: v1
kind: Service
metadata:
name: my-sc-web
labels:
app: sc-web
spec:
selector:
matchLabels:
app: sc-web
ports:
- port: 80
name: web
clusterIP: None
selector:
app: sc-web
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: my-sc-web
spec:
selector:
matchLabels:
app: sc-web
serviceName: "my-sc-web"
replicas: 3
template:
metadata:
labels:
app: sc-web
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-storageclass" #应对上面已经创建好的storageclass名
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
[root@k8s-node01 storageclass]# kubectl apply -f statefulset-storage.yaml #执行该文件
[root@k8s-node01 storageclass]# kubectl get pod -o wide -l app=sc-web #检查pod是否创建成功
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-sc-web-0 1/1 Running 0 8m32s 10.122.135.138 k8s-node03 <none> <none>
my-sc-web-1 1/1 Running 0 8m14s 10.122.58.218 k8s-node02 <none> <none>
my-sc-web-2 1/1 Running 0 7m21s 10.122.135.137 k8s-node03 <none> <none>
[root@k8s-node01 storageclass]# kubectl get pvc -l app=sc-web #检查pvc是否创建成功
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-my-sc-web-0 Bound pvc-2739349e-648f-4c2a-9f33-74068d18b4c2 5Gi RWO nfs-storageclass 98m
www-my-sc-web-1 Bound pvc-aee9ca35-4a5c-4f1a-a90b-2226c31d604a 5Gi RWO nfs-storageclass 98m
www-my-sc-web-2 Bound pvc-507028c9-7659-42fe-8778-d7ef54ae2312 5Gi RWO nfs-storageclass 90m
[root@k8s-node01 storageclass]# kubectl get pv |grep nfs-storageclass #检查相应的PV卷是否创成功
pvc-2739349e-648f-4c2a-9f33-74068d18b4c2 5Gi RWO Delete Bound default/www-my-sc-web-0 nfs-storageclass 100m
pvc-507028c9-7659-42fe-8778-d7ef54ae2312 5Gi RWO Delete Bound default/www-my-sc-web-2 nfs-storageclass 92m
pvc-aee9ca35-4a5c-4f1a-a90b-2226c31d604a 5Gi RWO Delete Bound default/www-my-sc-web-1 nfs-storageclass 100m
[root@k8s-node01 storageclass]# ll /home/pvdata/share/ #查看NFS共享目录下生成的pv目录
total 0
drwxrwxrwx 2 root root 24 Aug 19 01:52 default-www-my-sc-web-0-pvc-2739349e-648f-4c2a-9f33-74068d18b4c2
drwxrwxrwx 2 root root 24 Aug 19 01:53 default-www-my-sc-web-1-pvc-aee9ca35-4a5c-4f1a-a90b-2226c31d604a
drwxrwxrwx 2 root root 24 Aug 19 01:58 default-www-my-sc-web-2-pvc-507028c9-7659-42fe-8778-d7ef54ae2312
给3个站点创建index.html文件
[root@k8s-node01 storageclass]# for i in 0 1 2; do kubectl exec my-sc-web-$i -- sh -c 'echo Welcome to your visit,this is $(hostname). $(date) > /usr/share/nginx/html/index.html'; done
[root@k8s-node01 storageclass]# kubectl get pod -o wide -l app=sc-web #查看pod信息
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-sc-web-0 1/1 Running 0 23m 10.122.135.138 k8s-node03 <none> <none>
my-sc-web-1 1/1 Running 0 23m 10.122.58.218 k8s-node02 <none> <none>
my-sc-web-2 1/1 Running 0 22m 10.122.135.137 k8s-node03 <none> <none>
[root@k8s-node01 storageclass]# curl 10.122.135.138 #查看my-sc-web-0输出的信息
Welcome to your visit,this is my-sc-web-0. Wed Aug 19 07:39:31 UTC 2020
[root@k8s-node01 storageclass]# curl 10.122.58.218 #查看my-sc-web-1输出的信息
Welcome to your visit,this is my-sc-web-1. Wed Aug 19 07:39:28 UTC 2020
[root@k8s-node01 storageclass]# curl 10.122.135.137 #查看my-sc-web-1输出的信息
Welcome to your visit,this is my-sc-web-2. Wed Aug 19 07:39:31 UTC 2020
[root@k8s-node01 storageclass]#
删除3个站点进行数据持久化存储验证
[root@k8s-node01 storageclass]# kubectl delete pod -l app=sc-web
pod "my-sc-web-0" deleted
pod "my-sc-web-1" deleted
pod "my-sc-web-2" deleted
删除pod后,statefulset部署会自动创建3个新的pod
[root@k8s-node01 storageclass]# kubectl get pod -o wide -l app=sc-web #查看新生成的3个站点
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-sc-web-0 1/1 Running 0 84s 10.122.58.219 k8s-node02 <none> <none>
my-sc-web-1 1/1 Running 0 66s 10.122.135.136 k8s-node03 <none> <none>
my-sc-web-2 1/1 Running 0 48s 10.122.135.141 k8s-node03 <none> <none>
[root@k8s-node01 storageclass]# curl 10.122.58.219 #查看my-sc-web-0输出的信息
Welcome to your visit,this is my-sc-web-0. Wed Aug 19 07:39:31 UTC 2020
[root@k8s-node01 storageclass]# curl 10.122.135.136 #查看my-sc-web-1输出的信息
Welcome to your visit,this is my-sc-web-1. Wed Aug 19 07:39:28 UTC 2020
[root@k8s-node01 storageclass]# curl 10.122.135.141 #查看my-sc-web-3输出的信息
Welcome to your visit,this is my-sc-web-2. Wed Aug 19 07:39:31 UTC 2020
pod删除前后的数据对比,index.html文件数据没有变化,因数据持久化存储在NFS服务器,删除pod也不影响。
5、删除部署的statefulset有状态应用与回收PV
[root@k8s-node01 storageclass]# kubectl delete -f statefulset-storage.yaml #删除部署的nginx web有状态应用
service "my-sc-web" deleted
statefulset.apps "my-sc-web" deleted #确认删除
[root@k8s-node01 storageclass]# kubectl get pod -o wide -l app=sc-web #应用已经删除
No resources found in default namespace.
root@k8s-node01 storageclass]# kubectl get pvc -l app=sc-web #查看pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-my-sc-web-0 Bound pvc-2739349e-648f-4c2a-9f33-74068d18b4c2 5Gi RWO nfs-storageclass 125m
www-my-sc-web-1 Bound pvc-aee9ca35-4a5c-4f1a-a90b-2226c31d604a 5Gi RWO nfs-storageclass 124m
www-my-sc-web-2 Bound pvc-507028c9-7659-42fe-8778-d7ef54ae2312 5Gi RWO nfs-storageclass 116m
[root@k8s-node01 storageclass]# kubectl delete pvc -l app=sc-web #删除pvc
persistentvolumeclaim "www-my-sc-web-0" deleted
persistentvolumeclaim "www-my-sc-web-1" deleted
persistentvolumeclaim "www-my-sc-web-2" deleted
[root@k8s-node01 storageclass]# kubectl get pvc -l app=sc-web #pvc已经删除
No resources found in default namespace. #已经删除
[root@k8s-node01 storageclass]# ll /home/pvdata/share/ #删除pvc回收后,NFS pv目录名前会加上archived标识
total 0
drwxrwxrwx 2 root root 24 Aug 19 01:52 archived-default-www-my-sc-web-0-pvc-2739349e-648f-4c2a-9f33-74068d18b4c2
drwxrwxrwx 2 root root 24 Aug 19 01:53 archived-default-www-my-sc-web-1-pvc-aee9ca35-4a5c-4f1a-a90b-2226c31d604a
drwxrwxrwx 2 root root 24 Aug 19 01:58 archived-default-www-my-sc-web-2-pvc-507028c9-7659-42fe-8778-d7ef54ae2312
备注:
- 自动创建的 PV 以{pvcName}-${pvName}这样的命名格式创建在 NFS服务器上的共享数据目录中。
- 而当这个 PV 被回收后会以archieved-{pvcName}-${pvName}这样的命名格式存在NFS 服务器上。
End.