目录

1.1 rook架构介绍

1.2 rook部署

1.3 块存储使用

1.4 共享文件存储使用

1.5 在线pvc扩容和快照使用

1.5.1 在线扩容pvc

1.5.1 创建快照pvc

1.6 rook集群的清除


1.1 rook架构介绍

k8s部署hdfs集群的步骤 k8s部署ceph_云原生

k8s部署hdfs集群的步骤 k8s部署ceph_云原生_02

1.2 rook部署

官方文档:https://rook.io/docs/rook/v1.9/quickstart.html

#给节点打上污点202.203.204
#201
kubectl taint node k8s-192-168-44-202.host.com role=storage-node:NoSchedule
kubectl taint node k8s-192-168-44-203.host.com role=storage-node:NoSchedule
kubectl taint node k8s-192-168-44-204.host.com role=storage-node:NoSchedule
#查看201
for i in $(kubectl get nodes|awk 'NR>1''{print $1}');\
do \
kubectl describe nodes ${i} |egrep -i "taint";\
echo "===================================================";\
echo "${i}";\
echo "===================================================";\
echo;\
done

#给节点打上标签202.203.204
#201
kubectl label nodes k8s-192-168-44-202.host.com role=storage-node
kubectl label nodes k8s-192-168-44-203.host.com role=storage-node
kubectl label nodes k8s-192-168-44-204.host.com role=storage-node
#查看201
for i in $(kubectl get nodes|awk 'NR>1''{print $1}');\
do \
kubectl describe nodes ${i} |egrep -i -B 100 "Annotations";\
echo "===================================================";\
echo "${i}";\
echo "===================================================";\
echo;\
done

#部署操作员201
cd ~
git clone --single-branch --branch v1.9.2 https://github.com/rook/rook.git 
cd ~/rook/deploy/examples/ 
#处理国外镜像,参考【运维笔记-kubeadm-k8s-第七章节】201
egrep -i "(# ROOK_CSI(.*)_IMAGE|image:)" operator.yaml
=============================================================
  # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.6.1"
  # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0"
  # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.4.0"
  # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0"
  # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1"
  # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0"
  # ROOK_CSI_NFS_IMAGE: "k8s.gcr.io/sig-storage/nfsplugin:v3.1.0"
  # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.3.0"
  # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
          image: rook/ceph:v1.9.2
=============================================================
#替换镜像,并取消注释201
vim operator.yaml
egrep -i "(# ROOK_CSI(.*)_IMAGE|image:)" operator.yaml
=============================================================
  ROOK_CSI_CEPH_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/cephcsi:v3.6.1"
  ROOK_CSI_REGISTRAR_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/csi-node-driver-registrar:v2.5.0"
  ROOK_CSI_RESIZER_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/csi-resizer:v1.4.0"
  ROOK_CSI_PROVISIONER_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/csi-provisioner:v3.1.0"
  ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/csi-snapshotter:v5.0.1"
  ROOK_CSI_ATTACHER_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/csi-attacher:v3.4.0"
  ROOK_CSI_NFS_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/nfsplugin:v3.1.0"
  CSI_VOLUME_REPLICATION_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/volumereplication-operator:v0.3.0"
  ROOK_CSIADDONS_IMAGE: "registry.cn-hangzhou.aliyuncs.com/qiansong/k8s-sidecar:v0.2.1"
          image: rook/ceph:v1.9.2
=============================================================

#提前拉取镜像(可选)
for IMAGE in $(egrep "# ROOK_CSI.*_IMAGE" operator.yaml |awk -F '"' '{print $2}');\
do \
docker image pull $IMAGE;\
done

#添加节点亲和性和污点容忍201
#参考:https://github.com/rook/rook/blob/v1.9.2/deploy/examples/operator.yaml
vim operator.yaml
==========================================================
  # (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
  CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node"
  # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
  # Put here list of taints you want to tolerate in YAML format.
  # CSI provisioner would be best to start on the same nodes as other ceph daemons.
  CSI_PROVISIONER_TOLERATIONS: |
    - effect: NoSchedule
      key: role
      value: 'storage-node'
      operator: Equal
  # (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
  CSI_PLUGIN_NODE_AFFINITY: "kubernetes.io/os=linux"
  # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
  # Put here list of taints you want to tolerate in YAML format.
  # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
  CSI_PLUGIN_TOLERATIONS: |
    - effect: NoSchedule
      key: role
      value: 'storage-node'
      operator: Equal
============================================================
      # Uncomment it to run rook operator on the host network
      #hostNetwork: true
      #容忍度
      tolerations:
        - key: role #与要容忍的目标污点的key保持一致
          operator: Equal #如果 operator 是 Exists (此时容忍度不能指定 value);>如果 operator 是 Equal ,则 value 值应该与要容忍的目标污点的value保持一致
          value: 'storage-node' #与要容忍的目标污点的value保持一致
          effect: NoSchedule #与要容忍的目标污点的effect保持一致
      #节点亲和性
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: role
                    operator: In #必须满足标签为role值为storage-node
                    values:
                      - storage-node
===========================================================

#部署资源201
kubectl apply -f crds.yaml -f common.yaml -f operator.yaml 
kubectl -n rook-ceph get pod 

#添加磁盘202.203.204
#批量201
for i in 192.168.44.{202..204};\
do \
ssh root@${i} sudo yum install -y lvm2;\
ssh root@${i} sudo lsblk;\
echo "===================================================";\
echo "${i}";\
echo "===================================================";\
echo;\
done
====================================================
需要裸磁盘作为存储设备才行
====================================================
lsblk 
NAME               MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0                 11:0    1  1.9G  0 rom  
nvme0n1            259:0    0  100G  0 disk 
├─nvme0n1p1        259:1    0  500M  0 part /boot
└─nvme0n1p2        259:2    0 99.5G  0 part 
  ├─almalinux-root 253:0    0 97.5G  0 lvm  /
  └─almalinux-swap 253:1    0    2G  0 lvm  
nvme0n2            259:3    0  100G  0 disk 
==================================================

#编辑集群配置文件201
#文档:https://rook.io/docs/rook/v1.9/ceph-cluster-crd.html
#参考配置:https://github.com/rook/rook/blob/v1.9.2/deploy/examples/cluster.yaml
egrep -i "(IMAGE|image:)" cluster.yaml
========================================================= 
    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
    image: quay.io/ceph/ceph:v16.2.7
=========================================================
#编辑替换镜像【运维笔记-kubeadm-k8s-第七章节】201
vim cluster.yaml 
egrep -i "(IMAGE|image:)" cluster.yaml 
==========================================================
    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
    image: registry.cn-hangzhou.aliyuncs.com/qiansong/ceph:v16.2.7
==========================================================

#编辑集群配置201
#文档:https://rook.io/docs/rook/v1.9/ceph-cluster-crd.html
#参考配置:https://github.com/rook/rook/blob/v1.9.2/deploy/examples/cluster.yaml
vim cluster.yaml
===========================================================
  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
  # tolerate taints with a key of 'storage-node'.
  placement:
    all:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: role
              operator: In
              values:
              - storage-node
      podAffinity:
      podAntiAffinity:
      topologySpreadConstraints:
      tolerations:
      - key: role
        value: 'storage-node'
        operator: Equal
        effect: NoSchedule
======================================================
  storage: # cluster level storage configuration and selection
    useAllNodes: false
    useAllDevices: false
    #deviceFilter:
    config:
      # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
      # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
      # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
      # journalSizeMB: "1024"  # uncomment if the disks are 20 GB or smaller
      # osdsPerDevice: "1" # this value can be overridden at the node or device level
      # encryptedDevice: "true" # the default value for this option is "false"
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
    nodes:
      - name: "k8s-192-168-44-202.host.com"
        devices: # specific devices to use for storage can be specified for each node
          - name: "nvme0n2" # multiple osds can be created on high performance devices
            config:
              databaseSizeMB: "1024"
              journalSizeMB: "1024"
      - name: "k8s-192-168-44-203.host.com"
        devices: # specific devices to use for storage can be specified for each node
          - name: "nvme0n2" # multiple osds can be created on high performance devices
            config:
              databaseSizeMB: "1024"
              journalSizeMB: "1024"
      - name: "k8s-192-168-44-204.host.com"
        devices: # specific devices to use for storage can be specified for each node
          - name: "nvme0n2" # multiple osds can be created on high performance devices
            config:
              databaseSizeMB: "1024"
              journalSizeMB: "1024"
==============================================================

#部署ceph集群201
==============================================================
cluster.yaml:此文件包含生产存储集群的常用设置。至少需要三个k8s工作节点。
cluster-test.yaml:未配置冗余的测试集群的设置。只需要一个节点。
cluster-on-pvc.yaml:此文件包含用于通过 PV 支持 Ceph Mons 和 OSD 的常用设置。在云环境中运行或已创建本地 PV 供 Ceph 使用时很有用。
cluster-external:以最小的访问权限连接到外部 Ceph 集群,以监控集群的运行状况并连接到存储。
cluster-external-management:使用外部集群的 admin 密钥连接到外部 Ceph 集群,以启用远程创建池并配置对象存储或共享文件系统等服务。
=============================================================
kubectl apply -f cluster.yaml
kubectl -n rook-ceph get pod -owide
#要有以下pod运行证明成功
=============================================================
NAME                                                              READY   STATUS      RESTARTS   AGE     IP               NODE                          NOMINATED NODE   READINESS GATES
csi-cephfsplugin-gdkln                                            3/3     Running     0          4m48s   192.168.44.203   k8s-192-168-44-203.host.com   <none>           <none>
csi-cephfsplugin-mx6j2                                            3/3     Running     0          4m52s   192.168.44.204   k8s-192-168-44-204.host.com   <none>           <none>
csi-cephfsplugin-provisioner-574495c75b-f8nxt                     6/6     Running     0          96m     10.244.172.196   k8s-192-168-44-203.host.com   <none>           <none>
csi-cephfsplugin-provisioner-574495c75b-wd5db                     6/6     Running     0          96m     10.244.238.194   k8s-192-168-44-204.host.com   <none>           <none>
csi-cephfsplugin-vmn7k                                            3/3     Running     0          9m33s   192.168.44.201   k8s-192-168-44-201.host.com   <none>           <none>
csi-cephfsplugin-vzcbt                                            3/3     Running     0          4m44s   192.168.44.202   k8s-192-168-44-202.host.com   <none>           <none>
csi-rbdplugin-54kq5                                               3/3     Running     0          9m33s   192.168.44.201   k8s-192-168-44-201.host.com   <none>           <none>
csi-rbdplugin-6xmjq                                               3/3     Running     0          4m50s   192.168.44.203   k8s-192-168-44-203.host.com   <none>           <none>
csi-rbdplugin-drxtc                                               3/3     Running     0          4m46s   192.168.44.202   k8s-192-168-44-202.host.com   <none>           <none>
csi-rbdplugin-jgvd7                                               3/3     Running     0          4m53s   192.168.44.204   k8s-192-168-44-204.host.com   <none>           <none>
csi-rbdplugin-provisioner-79c9fcd8cb-9w8p6                        6/6     Running     0          96m     10.244.172.195   k8s-192-168-44-203.host.com   <none>           <none>
csi-rbdplugin-provisioner-79c9fcd8cb-ztkvd                        6/6     Running     0          96m     10.244.76.3      k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-crashcollector-k8s-192-168-44-202.host.com-79d8bkf4w4   1/1     Running     0          71m     10.244.76.9      k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-crashcollector-k8s-192-168-44-203.host.com-5fbffg9ccs   1/1     Running     0          71m     10.244.172.199   k8s-192-168-44-203.host.com   <none>           <none>
rook-ceph-crashcollector-k8s-192-168-44-204.host.com-5995cs54qw   1/1     Running     0          71m     10.244.238.203   k8s-192-168-44-204.host.com   <none>           <none>
rook-ceph-mgr-a-75cc789ccf-69bfn                                  2/2     Running     0          71m     10.244.238.199   k8s-192-168-44-204.host.com   <none>           <none>
rook-ceph-mgr-b-588db8cb7f-xbr7c                                  2/2     Running     0          71m     10.244.76.6      k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-mon-a-659fc96b47-jmtnf                                  1/1     Running     0          93m     10.244.76.5      k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-mon-b-5584ccd58c-plgpw                                  1/1     Running     0          73m     10.244.238.198   k8s-192-168-44-204.host.com   <none>           <none>
rook-ceph-mon-c-5b545789cd-rmvmh                                  1/1     Running     0          72m     10.244.172.198   k8s-192-168-44-203.host.com   <none>           <none>
rook-ceph-operator-6b87cd9d48-xpfg8                               1/1     Running     0          4h32m   10.244.76.2      k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-osd-0-86db846df7-ndk6x                                  1/1     Running     0          71m     10.244.172.201   k8s-192-168-44-203.host.com   <none>           <none>
rook-ceph-osd-1-7d786c4c75-hmptk                                  1/1     Running     0          71m     10.244.238.202   k8s-192-168-44-204.host.com   <none>           <none>
rook-ceph-osd-2-688786c57d-jwfq8                                  1/1     Running     0          71m     10.244.76.10     k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-osd-prepare-k8s-192-168-44-202.host.com-jsv94           0/1     Completed   0          70m     10.244.76.11     k8s-192-168-44-202.host.com   <none>           <none>
rook-ceph-osd-prepare-k8s-192-168-44-203.host.com-2r6qq           0/1     Completed   0          70m     10.244.172.203   k8s-192-168-44-203.host.com   <none>           <none>
rook-ceph-osd-prepare-k8s-192-168-44-204.host.com-hdfbx           0/1     Completed   0          70m     10.244.238.204   k8s-192-168-44-204.host.com   <none>           <none>
rook-ceph-tools-96c6b54d6-27mhq                                   1/1     Running     0          58m     10.244.238.205   k8s-192-168-44-204.host.com   <none>           <none>
==========================================================

#验证集群201
vim toolbox.yaml
==================================================
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: role
                operator: In #必须满足标签为role值为storage-node
                values:
                - storage-node
      tolerations:
        - key: role
          value: 'storage-node'
          operator: Equal
          effect: NoSchedule
        - key: "node.kubernetes.io/unreachable"
          operator: "Exists"
          effect: "NoExecute"
          tolerationSeconds: 5
==================================================
kubectl apply -f toolbox.yaml
kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" 
kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash 
================================================
$ ceph -s
  cluster:
    id:     4cf329c1-ab1a-4ad7-8838-c4d18ecf9a0b
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum b,a,c (age 14m)
    mgr: b(active, since 13m), standbys: a
    osd: 3 osds: 3 up (since 13m), 3 in (since 13m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   14 MiB used, 60 GiB / 60 GiB avail
    pgs:     1 active+clean
 
$ ceph df
--- RAW STORAGE ---
CLASS    SIZE   AVAIL    USED  RAW USED  %RAW USED
nvme   60 GiB  60 GiB  14 MiB    14 MiB       0.02
TOTAL  60 GiB  60 GiB  14 MiB    14 MiB       0.02
 
--- POOLS ---
POOL                   ID  PGS  STORED  OBJECTS  USED  %USED  MAX AVAIL
device_health_metrics   1    1     0 B        0   0 B      0     19 GiB
================================================

#集群出错清理201
文档:https://rook.io/docs/rook/v1.9/ceph-teardown.html
#删除修改前置资源(如果有的话)201
cd ~/rook/deploy/examples
kubectl delete -f ../wordpress.yaml 
kubectl delete -f ../ufile.yaml 
kubectl delete -n rook-ceph cephblockpool replicapool
kubectl delete -f csi/rbd/storageclass.yaml 
kubectl delete -f csi/cephfs/kube-registry.yaml 
kubectl delete -f csi/cephfs/storageclass.yaml
kubectl delete -f filesystem.yaml
#删除 Rook 集群201
kubectl -n rook-ceph patch cephcluster rook-ceph --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}' 
kubectl -n rook-ceph delete cephcluster rook-ceph 
#在继续下一步之前,请确认已删除集群 CRD。201
kubectl -n rook-ceph get cephcluster 
#删除 Operator 及相关资源201
kubectl delete -f operator.yaml 
kubectl delete -f common.yaml
kubectl delete -f crds.yaml
#顽固资源删除201
kubectl -n rook-ceph delete pod rook-ceph-csi-detect-version-p9k6p --force --grace-period=0

#删除主机上的数据202.203.204
yum install gdisk -y
rm -rf /var/lib/rook
DISK="/dev/nvme0n2"
sgdisk --zap-all $DISK
dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync 
blkdiscard $DISK
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove % 
rm -rf /dev/ceph-* 
rm -rf /dev/mapper/ceph--*
partprobe $DISK

1.3 块存储使用

官方文档:https://rook.io/docs/rook/v1.9/ceph-block.html

注意:块存储允许单个 pod 挂载存储。本指南展示了如何使用 Rook 启用的持久卷在 Kubernetes 上创建简单的多层 Web 应用程序。

#创建块存储类201
#参考文档:https://github.com/rook/rook/blob/v1.9.2/deploy/examples/csi/rbd/storageclass.yaml
cd ~/rook/deploy/examples
=========================================
cat csi/rbd/storageclass.yaml 
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
  name: replicapool
  namespace: rook-ceph # namespace:cluster
spec:
  failureDomain: host
  replicated:
    size: 3
    # Disallow setting pool with replica 1, this could lead to data loss without recovery.
    # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
    requireSafeReplicaSize: true
    # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
    # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
    #targetSizeRatio: .5
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: rook-ceph-block
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
  # clusterID is the namespace where the rook cluster is running
  # If you change this namespace, also change the namespace below where the secret namespaces are defined
  clusterID: rook-ceph # namespace:cluster

  # If you want to use erasure coded pool with RBD, you need to create
  # two pools. one erasure coded and one replicated.
  # You need to specify the replicated pool here in the `pool` parameter, it is
  # used for the metadata of the images.
  # The erasure coded pool must be set as the `dataPool` parameter below.
  #dataPool: ec-data-pool
  pool: replicapool

  # (optional) mapOptions is a comma-separated list of map options.
  # For krbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
  # For nbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
  # mapOptions: lock_on_read,queue_depth=1024

  # (optional) unmapOptions is a comma-separated list of unmap options.
  # For krbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
  # For nbd options refer
  # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
  # unmapOptions: force

   # (optional) Set it to true to encrypt each volume with encryption keys
   # from a key management system (KMS)
   # encrypted: "true"

   # (optional) Use external key management system (KMS) for encryption key by
   # specifying a unique ID matching a KMS ConfigMap. The ID is only used for
   # correlation to configmap entry.
   # encryptionKMSID: <kms-config-id>

  # RBD image format. Defaults to "2".
  imageFormat: "2"

  # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
  imageFeatures: layering

  # The secrets contain Ceph admin credentials. These are generated automatically by the operator
  # in the same namespace as the cluster.
  csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
  csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
  # Specify the filesystem type of the volume. If not specified, csi-provisioner
  # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
  # in hyperconverged settings where the volume is mounted on the same node as the osds.
  csi.storage.k8s.io/fstype: ext4
# uncomment the following to use rbd-nbd as mounter on supported nodes
# **IMPORTANT**: CephCSI v3.4.0 onwards a volume healer functionality is added to reattach
# the PVC to application pod if nodeplugin pod restart.
# Its still in Alpha support. Therefore, this option is not recommended for production use.
#mounter: rbd-nbd
allowVolumeExpansion: true
reclaimPolicy: Delete
=======================================================
#应用资源201
kubectl apply -f csi/rbd/storageclass.yaml

#创建应用测试201
mkdir /data/k8s-yaml/mynginx -p
cd /data/k8s-yaml/mynginx/
==============================================================
cat pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: nginx
  name: mynginx
  labels: {}
spec:
  resources:
    requests:
      storage: 1Gi
  storageClassName: rook-ceph-block
  accessModes:
    - ReadWriteOnce
==================================================================
cat deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: nginx
  labels:
    app: mynginx
  name: mynginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mynginx
  template:
    metadata:
      labels:
        app: mynginx
      annotations:
        logging.kubesphere.io/logsidecar-config: '{}'
    spec:
      nodeSelector:
        kubernetes.io/hostname: k8s-192-168-44-201.host.com      
      containers:
        - name: mynginx
          imagePullPolicy: IfNotPresent
          image: 'nginx:1.20.2'
          ports:
            - name: http-80
              protocol: TCP
              containerPort: 80
          volumeMounts:
            - name: host-time
              mountPath: /etc/localtime
              readOnly: true
            - name: volume-8a0xrz
              readOnly: false
              mountPath: /usr/share/nginx/html
      serviceAccount: default
      initContainers: []
      volumes:
        - hostPath:
            path: /etc/localtime
            type: ''
          name: host-time
        - name: volume-8a0xrz
          persistentVolumeClaim:
            claimName: mynginx
      imagePullSecrets: null
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
===================================================================
cat svc.yaml 
apiVersion: v1
kind: Service
metadata:
  namespace: nginx
  labels:
    app: mynginx-svc
  name: mynginx-svc
spec:
  sessionAffinity: None
  selector:
    app: mynginx
  ports:
    - name: http-80
      protocol: TCP
      targetPort: 80
      port: 80
  type: NodePort
=========================================================

#应用资源201
kubectl apply -f pvc.yaml
kubectl apply -f deploy.yaml
kubectl apply -f svc.yaml

#查看资源201
kubectl -n nginx get all 
NAME                           READY   STATUS    RESTARTS   AGE
pod/mynginx-665859d754-2lxxv   1/1     Running   0          34m

NAME                  TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
service/mynginx-svc   NodePort   10.0.4.41    <none>        80:30764/TCP   34m

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/mynginx   1/1     1            1           34m

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/mynginx-665859d754   1         1         1       34m

#访问测试201
kubectl -n nginx exec -it mynginx-665859d754-2lxxv -- sh -c "echo "啊哈哈哈哈" >/usr/share/nginx/html/index.html"

#删除测试资源201
kubectl delete -f svc.yaml 
kubectl delete -f deploy.yaml 
kubectl delete -f pvc.yaml

浏览器访问:http://192.168.44.201:30764/

k8s部署hdfs集群的步骤 k8s部署ceph_分布式_03

1.4 共享文件存储使用

官方文档:https://rook.io/docs/rook/v1.9/ceph-filesystem.html

注意:可以从多个 pod 以读/写权限挂载共享文件系统。这对于可以使用共享文件系统进行集群的应用程序可能很有用。

#创建文件系统201
cd ~/rook/deploy/examples
#编辑亲和性和容忍度,使其部署在存储节点
#参考文档:https://github.com/rook/rook/blob/v1.9.2/deploy/examples/filesystem.yaml
=============================================================
cat filesystem.yaml 
#################################################################################################################
# Create a filesystem with settings with replication enabled for a production environment.
# A minimum of 3 OSDs on different nodes are required in this example.
#  kubectl create -f filesystem.yaml
#################################################################################################################

apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
  name: myfs
  namespace: rook-ceph # namespace:cluster
spec:
  # The metadata pool spec. Must use replication.
  metadataPool:
    replicated:
      size: 3
      requireSafeReplicaSize: true
    parameters:
      # Inline compression mode for the data pool
      # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
      compression_mode:
        none
        # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
      # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
      #target_size_ratio: ".5"
  # The list of data pool specs. Can use replication or erasure coding.
  dataPools:
    - name: replicated
      failureDomain: host
      replicated:
        size: 3
        # Disallow setting pool with replica 1, this could lead to data loss without recovery.
        # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
        requireSafeReplicaSize: true
      parameters:
        # Inline compression mode for the data pool
        # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
        compression_mode:
          none
          # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
        # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
        #target_size_ratio: ".5"
  # Whether to preserve filesystem after CephFilesystem CRD deletion
  preserveFilesystemOnDelete: true
  # The metadata service (mds) configuration
  metadataServer:
    # The number of active MDS instances
    activeCount: 1
    # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
    # If false, standbys will be available, but will not have a warm cache.
    activeStandby: true
    # The affinity rules to apply to the mds deployment
    placement:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: role
              operator: In
              values:
              - storage-node
      topologySpreadConstraints:
      tolerations:
      - key: role
        operator: Equal
        value: 'storage-node'
        effect: NoSchedule
      podAffinity:
      podAntiAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
                - key: app
                  operator: In
                  values:
                    - rook-ceph-mds
            # topologyKey: kubernetes.io/hostname will place MDS across different hosts
            topologyKey: kubernetes.io/hostname
        preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: app
                    operator: In
                    values:
                      - rook-ceph-mds
              # topologyKey: */zone can be used to spread MDS across different AZ
              # Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
              # Use <topologyKey: topology.kubernetes.io/zone>  in k8s cluster is v1.17 or upper
              topologyKey: topology.kubernetes.io/zone
    # A key/value list of annotations
    # annotations:
    #  key: value
    # A key/value list of labels
    # labels:
    #  key: value
    # resources:
    # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
    #  limits:
    #    cpu: "500m"
    #    memory: "1024Mi"
    #  requests:
    #    cpu: "500m"
    #    memory: "1024Mi"
    priorityClassName: system-cluster-critical
    livenessProbe:
      disabled: false
    startupProbe:
      disabled: false
  # Filesystem mirroring settings
  # mirroring:
    # enabled: true
    # list of Kubernetes Secrets containing the peer token
    # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
    # peers:
      #secretNames:
        #- secondary-cluster-peer
    # specify the schedule(s) on which snapshots should be taken
    # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
    # snapshotSchedules:
    #   - path: /
    #     interval: 24h # daily snapshots
    #     startTime: 11:55
    # manage retention policies
    # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
    # snapshotRetention:
    #   - path: /
    #     duration: "h 24"
=============================================================
#应用资源201
kubectl apply -f filesystem.yaml
#查看资源201
kubectl -n rook-ceph get pod -l app=rook-ceph-mds
kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l app=rook-ceph-tools |awk "NR>1"'{print $1}') -- ceph status
==========================================================
  cluster:
    id:     4cf329c1-ab1a-4ad7-8838-c4d18ecf9a0b
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum b,a,c (age 105m)
    mgr: b(active, since 104m), standbys: a
    mds: 1/1 daemons up, 1 hot standby
    osd: 3 osds: 3 up (since 104m), 3 in (since 104m)
 
  data:
    volumes: 1/1 healthy
    pools:   4 pools, 97 pgs
    objects: 27 objects, 2.3 KiB
    usage:   18 MiB used, 60 GiB / 60 GiB avail
    pgs:     97 active+clean
 
  io:
    client:   1.2 KiB/s rd, 2 op/s rd, 0 op/s wr
==========================================================

#创建存储类型201
#参考文档:https://github.com/rook/rook/blob/v1.9.2/deploy/examples/csi/cephfs/storageclass.yaml
cd ~/rook/deploy/examples
=========================================================
cat csi/cephfs/storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: rook-cephfs
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
parameters:
  # clusterID is the namespace where the rook cluster is running
  # If you change this namespace, also change the namespace below where the secret namespaces are defined
  clusterID: rook-ceph # namespace:cluster

  # CephFS filesystem name into which the volume shall be created
  fsName: myfs

  # Ceph pool into which the volume shall be created
  # Required for provisionVolume: "true"
  pool: myfs-replicated

  # The secrets contain Ceph admin credentials. These are generated automatically by the operator
  # in the same namespace as the cluster.
  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
  csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
  csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
  csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster

  # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
  # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
  # or by setting the default mounter explicitly via --volumemounter command-line argument.
  # mounter: kernel
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
  # uncomment the following line for debugging
  #- debug
=============================================================
#应用资源201
kubectl apply -f  csi/cephfs/storageclass.yaml

#创建测试应用201
mkdir /data/k8s-yaml/nginx -p
cd /data/k8s-yaml/nginx/
===============================================================
cat pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: nginx
  name: nginx
  labels: {}
spec:
  resources:
    requests:
      storage: 1Gi
  storageClassName: rook-cephfs
  accessModes:
    - ReadWriteMany
============================================================
cat deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: nginx
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
      annotations:
        logging.kubesphere.io/logsidecar-config: '{}'
    spec:
      nodeSelector:
        kubernetes.io/hostname: k8s-192-168-44-201.host.com
      containers:
        - name: nginx
          imagePullPolicy: IfNotPresent
          image: 'nginx:1.20.2'
          ports:
            - name: http-80
              protocol: TCP
              containerPort: 80
          volumeMounts:
            - name: host-time
              mountPath: /etc/localtime
              readOnly: true
            - name: volume-4cnwwz
              readOnly: false
              mountPath: /usr/share/nginx/html
      serviceAccount: default
      initContainers: []
      volumes:
        - hostPath:
            path: /etc/localtime
            type: ''
          name: host-time
        - name: volume-4cnwwz
          persistentVolumeClaim:
            claimName: nginx
      imagePullSecrets: null
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
=========================================================
cat svc.yaml 
apiVersion: v1
kind: Service
metadata:
  namespace: nginx
  labels:
    app: nginx-svc
  name: nginx-svc
spec:
  sessionAffinity: None
  selector:
    app: nginx
  ports:
    - name: http-80
      protocol: TCP
      targetPort: 80
      port: 80
  type: NodePort
============================================================

#应用资源201
kubectl apply -f pvc.yaml 
kubectl apply -f deploy.yaml 
kubectl apply -f svc.yaml

#查看资源201
kubectl -n nginx get all
NAME                        READY   STATUS    RESTARTS   AGE
pod/nginx-59864b6cb-fzgz8   1/1     Running   0          2m21s

NAME                TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
service/nginx-svc   NodePort   10.0.120.125   <none>        80:30734/TCP   2m21s

NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx   1/1     1            1           2m21s

NAME                              DESIRED   CURRENT   READY   AGE
replicaset.apps/nginx-59864b6cb   1         1         1       2m21s

#服务测试201
kubectl -n nginx exec -it $(kubectl -n nginx get pod -l app=nginx |awk "NR>1"'{print $1}') -- sh -c "echo "hahahahah噶哈哈哈" >/usr/share/nginx/html/index.html"

#扩展pod副本看看多节点读写201
kubectl -n nginx scale deployment nginx --replicas=3

#查看201
kubectl -n nginx get pod -owide
NAME                    READY   STATUS    RESTARTS   AGE     IP             NODE                          NOMINATED NODE   READINESS GATES
nginx-59864b6cb-9l455   1/1     Running   0          40s     10.244.155.9   k8s-192-168-44-201.host.com   <none>           <none>
nginx-59864b6cb-dgx6r   1/1     Running   0          40s     10.244.155.8   k8s-192-168-44-201.host.com   <none>           <none>
nginx-59864b6cb-fzgz8   1/1     Running   0          5m15s   10.244.155.7   k8s-192-168-44-201.host.com   <none>           <none>

#删除测试资源201
kubectl delete -f svc.yaml 
kubectl delete -f deploy.yaml 
kubectl delete -f pvc.yaml

浏览器访问:http://192.168.44.201:30734/

k8s部署hdfs集群的步骤 k8s部署ceph_云原生_04

1.5 在线pvc扩容和快照使用

1.5.1 在线扩容pvc

官方文档:https://rook.io/docs/rook/v1.3/ceph-csi-drivers.html

#所有组件开启特性门控,所有master节点201.202.203
======================================================
                  apiserver
=======================================================
vim /usr/lib/systemd/system/kube-apiserver.service
--feature-gates="RemoveSelfLink=false,EphemeralContainers=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true"
for IP in 192.168.44.{202,203};
do \
rsync -avzP /usr/lib/systemd/system/kube-apiserver.service root@${IP}:/usr/lib/systemd/system/;\
done
#重启服务201
systemctl daemon-reload
systemctl restart kube-apiserver.service
for IP in 192.168.44.{202,203};
do \
ssh root@${IP} systemctl daemon-reload;\
ssh root@${IP} systemctl restart kube-apiserver.servicedone;\
done
=====================================================
                 kube-controller-manager
======================================================
vim /usr/lib/systemd/system/kube-controller-manager.service
--feature-gates="RemoveSelfLink=false,EphemeralContainers=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true" \
for IP in 192.168.44.{202,203};
do \
rsync -avzP /usr/lib/systemd/system/kube-controller-manager.service root@${IP}:/usr/lib/systemd/system/;\
done
#重启服务201
systemctl daemon-reload
systemctl restart kube-controller-manager.service
for IP in 192.168.44.{202,203};
do \
ssh root@${IP} systemctl daemon-reload;\
ssh root@${IP} systemctl restart kube-controller-manager.service;\
done
=====================================================
                 kube-scheduler
======================================================
vim /usr/lib/systemd/system/kube-scheduler.service
--feature-gates="RemoveSelfLink=false,EphemeralContainers=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true" \
for IP in 192.168.44.{202,203};
do \
rsync -avzP /usr/lib/systemd/system/kube-scheduler.service root@${IP}:/usr/lib/systemd/system/;\
done
#重启服务201
systemctl daemon-reload
systemctl restart kube-scheduler.service
for IP in 192.168.44.{202,203};
do \
ssh root@${IP} systemctl daemon-reload;\
ssh root@${IP} systemctl restart kube-scheduler.service;\
done

#所有组件开启特性门控,所有node节点201.202.203
=====================================================
                 kube-proxy
======================================================
vim /usr/lib/systemd/system/kube-proxy.service
--feature-gates="RemoveSelfLink=false,EphemeralContainers=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true" \
for IP in 192.168.44.{202,203};
do \
rsync -avzP /usr/lib/systemd/system/kube-proxy.service root@${IP}:/usr/lib/systemd/system/;\
done
#重启服务201
systemctl daemon-reload
systemctl restart kube-proxy.service
for IP in 192.168.44.{202,203};
do \
ssh root@${IP} systemctl daemon-reload;\
ssh root@${IP} systemctl restart kube-proxy.service;\
done
=====================================================
                 kubelet
======================================================
vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --feature-gates="RemoveSelfLink=false,EphemeralContainers=true,VolumeSnapshotDataSource=true,ExpandCSIVolumes=true""
for IP in 192.168.44.{202,203};
do \
rsync -avzP /etc/systemd/system/kubelet.service.d/10-kubelet.conf root@${IP}:/etc/systemd/system/kubelet.service.d/;\
done
#重启服务201
systemctl daemon-reload
systemctl restart kubelet.service
for IP in 192.168.44.{202,203};
do \
ssh root@${IP} systemctl daemon-reload;\
ssh root@${IP} systemctl restart kubelet.service;\
done

#扩容pvc资源201
kubectl -n nginx edit pvc nginx

#查看效果201
kubectl -n nginx exec -it $(kubectl -n nginx get pod -l app=nginx  |awk "NR>1"'{print $1}') -- sh -c "df -h"
Filesystem                                                                                                        Size  Used Avail Use% Mounted on
overlay                                                                                                            98G   13G   85G  13% /
tmpfs                                                                                                              64M     0   64M   0% /dev
tmpfs                                                                                                             7.9G     0  7.9G   0% /sys/fs/cgroup
/dev/mapper/almalinux-root                                                                                         98G   13G   85G  13% /usr/share/zoneinfo/Etc/UTC
shm                                                                                                                64M     0   64M   0% /dev/shm
10.0.127.195:6789:/volumes/csi/csi-vol-7a420384-a0f1-11ec-92b7-62cbc38c7ea2/38213224-22a5-48dc-831a-758853fa62b4  2.0G     0  2.0G   0% /usr/share/nginx/html
tmpfs                                                                                                             7.9G   12K  7.9G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                                                                                             7.9G     0  7.9G   0% /proc/acpi
tmpfs                                                                                                             7.9G     0  7.9G   0% /proc/scsi
tmpfs                                                                                                             7.9G     0  7.9G   0% /sys/firmware

1.5.1 创建快照pvc

官方文档:https://rook.io/docs/rook/v1.3/ceph-csi-drivers.html

1.6 rook集群的清除

#集群出错清理201
文档:https://rook.io/docs/rook/v1.9/ceph-teardown.html
#删除修改前置资源(如果有的话)201
cd /data/k8s-yaml/nginx/
kubectl delete -f svc.yaml
kubectl delete -f deploy.yaml 
kubectl delete -f pvc.yaml
cd ~/rook/deploy/examples
kubectl delete -f ../wordpress.yaml 
kubectl delete -f ../ufile.yaml 
kubectl delete -n rook-ceph cephblockpool replicapool
kubectl delete -f csi/rbd/storageclass.yaml 
kubectl delete -f csi/cephfs/kube-registry.yaml 
kubectl delete -f csi/cephfs/storageclass.yaml
kubectl delete -f filesystem.yaml
#删除 Rook 集群201
kubectl -n rook-ceph patch cephcluster rook-ceph --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}' 
kubectl -n rook-ceph delete cephcluster rook-ceph 
#在继续下一步之前,请确认已删除集群 CRD。201
kubectl -n rook-ceph get cephcluster 
#删除 Operator 及相关资源201
kubectl delete -f operator.yaml 
kubectl delete -f common.yaml
kubectl delete -f crds.yaml
#顽固资源删除201
kubectl -n rook-ceph delete pod rook-ceph-csi-detect-version-p9k6p --force --grace-period=0

#删除主机上的数据201.202.203.204
yum install gdisk -y
rm -rf /var/lib/rook
DISK="/dev/nvme0n2"
sgdisk --zap-all $DISK
dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync 
blkdiscard $DISK
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove % 
rm -rf /dev/ceph-* 
rm -rf /dev/mapper/ceph--*
partprobe $DISK 

#删除残余sock文件
rm -rf /var/lib/kubelet/plugins_registry/rook-ceph*