k8s使用ceph rbd存储
K爷 DevOps视角
部署ceph的服务器
在172.16.1.11、172.16.1.12、172.16.1.13上部署ceph存储
ceph集群操作
创建pool与image
$ ceph osd pool create kube 64 64
pool 'kube' created
$ ceph osd pool application enable kube rbd
enabled application 'rbd' on pool 'kube'
$ rbd pool init -p kube
$ rbd create kube/podimg01 --size 2G
$ rbd ls --pool kube -l
NAME SIZE PARENT FMT PROT LOCK
podimg01 2 GiB 2
$ rbd feature disable kube/podimg01 object-map fast-diff deep-flatten
$ rbd info kube/podimg01
rbd image 'podimg01':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
id: 5e776b8b4567
block_name_prefix: rbd_data.5e776b8b4567
format: 2
features: layering, exclusive-lock
op_features:
flags:
create_timestamp: Tue Sep 8 22:05:41 2020
创建拥有访问相关存储池权限的用户
创建用户,并将用户信息保存的文件中,同时复制的k8s集群的各节点
$ ceph auth get-or-create client.k8s mon 'allow r' osd 'allow * pool=kube'
[client.k8s]
key = AQCVlFdfoWBSIhAAXedA2yffXdWMHGgkppB03A==
$ ceph auth get client.k8s -o ceph.client.k8s.keyring
exported keyring for client.k8s
$ scp ceph.conf ceph.client.k8s.keyring root@k8s-master1:/etc/ceph/
$ scp ceph.conf ceph.client.k8s.keyring root@k8s-master2:/etc/ceph/
$ scp ceph.conf ceph.client.k8s.keyring root@k8s-master3:/etc/ceph/
$ scp ceph.conf ceph.client.k8s.keyring root@k8s-node1:/etc/ceph/
$ scp ceph.conf ceph.client.k8s.keyring root@k8s-node2:/etc/ceph/
$ scp ceph.conf ceph.client.k8s.keyring root@k8s-node3:/etc/ceph/
在k8s节点查看
root@k8s-master1:~# apt install ceph-common
root@k8s-master1:~# ceph --user k8s -s
cluster:
id: f7be5fba-531a-41c2-ab87-97831d212358
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph-node1,ceph-node2,ceph-node3
mgr: ceph-node2(active), standbys: ceph-node1
osd: 3 osds: 3 up, 3 in
rgw: 1 daemon active
data:
pools: 5 pools, 96 pgs
objects: 224 objects, 1.3 KiB
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs: 96 active+clean
root@k8s-master1:~# rbd --user=k8s ls -p kube
podimg01
使用rbd作为存储卷
以keyring方式
root@k8s-master1:~# cd /opt/k8s-data/yaml/
root@k8s-master1:/opt/k8s-data/yaml# cd ceph/
root@k8s-master1:/opt/k8s-data/yaml/ceph# vim keyring.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: kevin-keyring
name: kevin-keyring-deployment
namespace: kevin
spec:
replicas: 1
selector:
matchLabels:
app: kevin-keyring
template:
metadata:
labels:
app: kevin-keyring
spec:
containers:
- name: kevin-keyring-container
image: busybox
imagePullPolicy: Always
command: ["/bin/sh", "-c", "sleep 86400"]
volumeMounts:
- name: rbdpod
mountPath: /data
volumes:
- name: rbdpod
rbd:
monitors:
- '172.16.1.11:6789'
- '172.16.1.12:6789'
- '172.16.1.13:6789'
pool: 'kube'
image: podimg01
fsType: ext4
readOnly: false
user: k8s
keyring: /etc/ceph/ceph.client.k8s.keyring
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl get pod -n kevin
NAME READY STATUS RESTARTS AGE
kevin-keyring-deployment-78ccd7b768-kw5qd 1/1 Running 0 2m18s
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl exec -it kevin-keyring-deployment-78ccd7b768-kw5qd -n kevin sh
/ # mount
......
/dev/rbd0 on /data type ext4 (rw,relatime,stripe=1024,data=ordered)
......
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl delete -f keyring.yaml
deployment.apps "kevin-keyring-deployment" deleted
以secret方式
在ceph集群获取secret
$ ceph auth print-key client.k8s|base64
QVFDVmxGZGZvV0JTSWhBQVhlZEEyeWZmWGRXTUhHZ2twcEIwM0E9PQ==
编辑yaml
root@k8s-master1:/opt/k8s-data/yaml/ceph# vim secrete.yml
---
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
namespace: kevin
type: "kubernetes.io/rbd"
data:
key: QVFDVmxGZGZvV0JTSWhBQVhlZEEyeWZmWGRXTUhHZ2twcEIwM0E9PQ==
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: kevin-keyring
name: kevin-keyring-deployment
namespace: kevin
spec:
replicas: 1
selector:
matchLabels:
app: kevin-keyring
template:
metadata:
labels:
app: kevin-keyring
spec:
containers:
- name: kevin-keyring-container
image: busybox
imagePullPolicy: Always
command: ["/bin/sh", "-c", "sleep 86400"]
volumeMounts:
- name: rbdpod
mountPath: /data
volumes:
- name: rbdpod
rbd:
monitors:
- '172.16.1.11:6789'
- '172.16.1.12:6789'
- '172.16.1.13:6789'
pool: 'kube'
image: podimg01
fsType: ext4
readOnly: false
user: k8s
secretRef:
name: ceph-secret
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl apply -f secrete.yml
secret/ceph-secret created
deployment.apps/kevin-keyring-deployment created
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl get secret -n kevin
NAME TYPE DATA AGE
ceph-secret kubernetes.io/rbd 1 4s
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl get pod -n kevin
NAME READY STATUS RESTARTS AGE
kevin-keyring-deployment-7cd58c5f7d-fvsjm 1/1 Running 0 29s
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl exec -it kevin-keyring-deployment-7cd58c5f7d-fvsjm -n kevin /bin/sh
/ # mount
......
/dev/rbd0 on /data type ext4 (rw,relatime,stripe=1024,data=ordered)
......
root@k8s-master1:/opt/k8s-data/yaml/ceph# kubectl delete -f secrete.yml
secret "ceph-secret" deleted
deployment.apps "kevin-keyring-deployment" deleted