磁盘要求

新添加的磁盘只能是无分区或格式化文件系统,不能是lvm卷

修改发现磁盘扫描间隔

vim rook/deploy/examples/operator.yaml
# The duration between discovering devices in the rook-discover daemonset.
- name: ROOK_DISCOVER_DEVICES_INTERVAL
  value: "60"

下发资源

kubectl apply -f rook/deploy/examples/operator.yaml

添加OSD磁盘

vim rook/deploy/examples/cluster.yaml
nodes:
#   - name: "172.17.4.201"
#     devices: # specific devices to use for storage can be specified for each node
#       - name: "sdb"
#       - name: "nvme01" # multiple osds can be created on high performance devices
#         config:
#           osdsPerDevice: "5"
#       - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
#     config: # configuration can be specified at the node level which overrides the cluster level config
#   - name: "172.17.4.301"
#     deviceFilter: "^sd."
# when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
- name: "master"
  devices:
  - name: "vdb"
  - name: "vdc"
- name: "node"
  devices:
  - name: "vdb"
  - name: "vdc"
- name: "node2"
  devices:
  - name: "vdb"
  - name: "vdc"
onlyApplyOSDPlacement: false

下发资源

kubectl apply -f rook/deploy/examples/cluster.yaml

检查

#新增了osd-3-5
[root@master ~]# kubectl get pod -n rook-ceph
NAME                                              READY   STATUS      RESTARTS   AGE
csi-cephfsplugin-7ftv5                            2/2     Running     0          170m
csi-cephfsplugin-kfwrn                            2/2     Running     0          170m
csi-cephfsplugin-nrv6r                            2/2     Running     0          170m
csi-cephfsplugin-provisioner-6875cdf85f-nv4z7     5/5     Running     0          170m
csi-cephfsplugin-provisioner-6875cdf85f-zm8zf     5/5     Running     0          170m
csi-rbdplugin-provisioner-cc86dcdc8-g8985         5/5     Running     0          170m
csi-rbdplugin-provisioner-cc86dcdc8-s78h8         5/5     Running     0          170m
csi-rbdplugin-q2xst                               2/2     Running     0          170m
csi-rbdplugin-qfckq                               2/2     Running     0          170m
csi-rbdplugin-wd9tf                               2/2     Running     0          170m
rook-ceph-crashcollector-master-dffb5cb69-v2b9h   1/1     Running     0          117m
rook-ceph-crashcollector-node-747965c88c-bddk6    1/1     Running     0          117m
rook-ceph-crashcollector-node2-5446865889-p8xc7   1/1     Running     0          168m
rook-ceph-mds-myfs-a-7d8ff84777-qtwn4             2/2     Running     0          117m
rook-ceph-mds-myfs-b-748d64bdb6-z4tn4             2/2     Running     0          117m
rook-ceph-mgr-a-6786cf7454-ztx9h                  3/3     Running     0          168m
rook-ceph-mgr-b-84459794f9-ld6tj                  3/3     Running     0          168m
rook-ceph-mon-a-656c57bbf5-mj5jn                  2/2     Running     0          170m
rook-ceph-mon-b-7b5955d99d-wcqz9                  2/2     Running     0          169m
rook-ceph-mon-c-78749f8647-jw8n9                  2/2     Running     0          168m
rook-ceph-operator-99ffdc499-2cjs2                1/1     Running     0          171m
rook-ceph-osd-0-b86dff787-zjgbd                   2/2     Running     0          166m
rook-ceph-osd-1-6899548f48-hl2ct                  2/2     Running     0          166m
rook-ceph-osd-2-cbf6db88c-jv9z4                   2/2     Running     0          166m
rook-ceph-osd-3-f7c759774-qnttt                   2/2     Running     0          162m
rook-ceph-osd-4-556d4cb6b5-jv2cf                  2/2     Running     0          162m
rook-ceph-osd-5-5d8b5bd559-57r4h                  2/2     Running     0          162m
rook-ceph-osd-prepare-master-cfm4b                0/1     Completed   0          163m
rook-ceph-osd-prepare-node-xm6kv                  0/1     Completed   0          163m
rook-ceph-osd-prepare-node2-nlbzj                 0/1     Completed   0          162m
rook-ceph-tools-68754fc9dd-568gk                  1/1     Running     0          170m

如果没有资源文件可以直接edit修改资源

kubectl edit cephcluster -n rook-ceph
  storage:
    nodes:
    - devices:
      - name: vdb
      - name: vdc
      name: master
    - devices:
      - name: vdb
      - name: vdc
      name: node
    - devices:
      - name: vdb
      - name: vdc
      name: node2