ceph集群的容器化安装比原始安装步骤简单,好操作。但是运维相对可能会有许多坑需要趟。下面就做一个简单的分享。

1 节点规划

    admin 172.18.1.193

    node1 172.18.1.195

    node2 172.18.1.196

2 ceph的jewel版本进行拉取

[root@admin osd2]# docker pull tag-build-master-jewel-centos-7
[root@admin osd2]# docker images|grep jew
docker.io/ceph/daemon       tag-build-master-jewel-centos-7   74723dc740be        7 weeks ago         677.5 MB

3 在admin节点上允许mon容器

[root@admin osd2]# docker run -d --net=host --name=mon -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -e MON_IP=172.18.1.193 -e CEPH_PUBLIC_NETWORK=172.18.1.0/24 docker.io/ceph/daemon:tag-build-master-jewel-centos-7 mon
3a12bbd4b81cfb2e3a234f08fb2c751ffa4e3482a2602733b5d4f7e8b934782e
# mon容器运行起来了
[root@admin osd2]# docker ps|grep mon
3a12bbd4b81c        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh mon"     29 seconds ago      Up 28 seconds                                                  mon
# 检查一下mon容器运行日志(一般不会有问题,这里就不全部展示了)
[root@admin osd2]# docker logs mon
creating /etc/ceph/ceph.client.admin.keyring
creating /etc/ceph/ceph.mon.keyring
creating /var/lib/ceph/bootstrap-osd/ceph.keyring
creating /var/lib/ceph/bootstrap-mds/ceph.keyring
creating /var/lib/ceph/bootstrap-rgw/ceph.keyring

4 拷贝配置文件和系统文件到其他两个节点,再用同样的方法在其他节点上启动mon

    这一步非常重要。如果没有 拷贝admin节点安装mon后生产的配置文件和系统文件到其他节点,就开始在其他节点启动mon则三个节点会单独启动3个ceph集群,而不是一个集群的三个mon节点

[root@admin osd2]# scp -r /etc/ceph /node1:/etc/
cp: cannot create directory ‘/node1:/etc/’: No such file or directory
[root@admin osd2]# scp -r /etc/ceph node1:/etc/
root@node1's password: 
ceph.conf                                                                                                                                100%  212     0.2KB/s   00:00    
ceph.client.admin.keyring                                                                                                                100%  159     0.2KB/s   00:00    
ceph.mon.keyring                                                                                  [root@admin osd2]# scp -r /etc/ceph node2:/etc/
root@node2's password: 
ceph.conf                                                                                                                                100%  212     0.2KB/s   00:00    
ceph.client.admin.keyring                                                                                                                100%  159     0.2KB/s   00:00    
ceph.mon.keyring                                                                                                                         100%  575     0.6KB/s   00:00                                       100%  575     0.6KB/s   00:00 
# 检查一下
[root@node1 osd4]# cd /etc/ceph/
[root@node1 ceph]# ll
total 12
-rw------- 1 root root 159 Apr 16 19:14 ceph.client.admin.keyring
-rw-r--r-- 1 root root 212 Apr 16 19:14 ceph.conf
-rw------- 1 root root 575 Apr 16 19:14 ceph.mon.keyring
root@node2 /]# cd /etc/ceph/
[root@node2 ceph]# ll
total 12
-rw------- 1 root root 159 Apr 16 19:15 ceph.client.admin.keyring
-rw-r--r-- 1 root root 212 Apr 16 19:15 ceph.conf
-rw------- 1 root root 575 Apr 16 19:15 ceph.mon.keyring
[root@admin osd2]# scp -r /var/lib/ceph node1:/var/lib/
root@node1's password: 
ceph.keyring                                                                                                                             100%  113     0.1KB/s   00:00    
ceph.keyring                                                                                                                             100%  113     0.1KB/s   00:00    
ceph.keyring                                                                                                                             100%  113     0.1KB/s   00:00    
LOCK                                                                                                                                     100%    0     0.0KB/s   00:00    
CURRENT                                                                                                                                  100%   16     0.0KB/s   00:00    
000005.sst                                                                                                                               100% 1080     1.1KB/s   00:00    
000006.log                                                                                                                               100%  192KB 192.0KB/s   00:00    
MANIFEST-000004                                                                                                                          100%   64KB  64.0KB/s   00:00    
keyring                                                                                                                                  100%   77     0.1KB/s   00:00 
[root@admin osd2]# scp -r /var/lib/ceph node2:/var/lib/
root@node2's password: 
ceph.keyring                                                                                                                             100%  113     0.1KB/s   00:00    
ceph.keyring                                                                                                                             100%  113     0.1KB/s   00:00    
ceph.keyring                                                                                                                             100%  113     0.1KB/s   00:00    
LOCK                                                                                                                                     100%    0     0.0KB/s   00:00    
CURRENT                                                                                                                                  100%   16     0.0KB/s   00:00    
000005.sst                                                                                                                               100% 1080     1.1KB/s   00:00    
000006.log                                                                                                                               100%  192KB 192.0KB/s   00:00    
MANIFEST-000004                                                                                                                          100%   64KB  64.0KB/s   00:00    
keyring                                                                                                                                  100%   77     0.1KB/s   00:00 
# 检查一下
[root@node1 ceph]# cd /var/lib/ceph/
[root@node1 ceph]# ll
total 0
drwxr-xr-x 2 root root 25 Apr 16 19:17 bootstrap-mds
drwxr-xr-x 2 root root 25 Apr 16 19:17 bootstrap-osd
drwxr-xr-x 2 root root 25 Apr 16 19:17 bootstrap-rgw
drwxr-xr-x 3 root root 23 Apr 16 19:17 mds
drwxr-xr-x 3 root root 23 Apr 16 19:17 mon
drwxr-xr-x 2 root root  6 Apr 16 19:17 osd
drwxr-xr-x 3 root root 27 Apr 16 19:17 radosgw
drwxr-xr-x 3 root root 27 Apr 16 19:17 tmp
[root@node2 ceph]# cd /var/lib/ceph/
[root@node2 ceph]# ll
total 0
drwxr-xr-x 2 root root 25 Apr 16 19:17 bootstrap-mds
drwxr-xr-x 2 root root 25 Apr 16 19:17 bootstrap-osd
drwxr-xr-x 2 root root 25 Apr 16 19:17 bootstrap-rgw
drwxr-xr-x 3 root root 23 Apr 16 19:17 mds
drwxr-xr-x 3 root root 23 Apr 16 19:17 mon
drwxr-xr-x 2 root root  6 Apr 16 19:17 osd
drwxr-xr-x 3 root root 27 Apr 16 19:17 radosgw
drwxr-xr-x 3 root root 27 Apr 16 19:17 tmp

在其他节点上启动mon,启动命令都一样只是修改一下mon_id 的IP地址

[root@node1 ceph]# docker run -d --net=host --name=mon -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -e MON_IP=172.18.1.195 -e CEPH_PUBLIC_NETWORK=172.18.1.0/24 docker.io/ceph/daemon:tag-build-master-jewel-centos-7 mon
632e06c5735c927c80a974d84627184798f0e0becd78a87b20668dd07c024876
[root@node1 ceph]# dokcer ps|grep mon
-bash: dokcer: command not found
[root@node1 ceph]# docker ps|grep mon
632e06c5735c        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh mon"     About a minute ago   Up About a minute                                              mon
[root@node2 ceph]# docker run -d --net=host --name=mon -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -e MON_IP=172.18.1.196 -e CEPH_PUBLIC_NETWORK=172.18.1.0/24 docker.io/ceph/daemon:tag-build-master-jewel-centos-7 mon
1f42acaf33b0fe499f3e8d560b58080971648bf3d50e441cdb9b35189b40390d
[root@node2 ceph]# docker ps|grep mon
1f42acaf33b0        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh mon"   33 seconds ago      Up 31 seconds                           mon

检查ceph监控状态

[root@admin osd2]# docker exec mon ceph -s
    cluster d4ec799c-1f54-4441-b19c-cd14a6a8710b
     health HEALTH_ERR
            clock skew detected on mon.node2
            64 pgs are stuck inactive for more than 300 seconds
            64 pgs stuck inactive
            64 pgs stuck unclean
            no osds
            Monitor clock skew detected 
     monmap e3: 3 mons at {admin=172.18.1.193:6789/0,node1=172.18.1.195:6789/0,node2=172.18.1.196:6789/0}
            election epoch 6, quorum 0,1,2 admin,node1,node2
     osdmap e1: 0 osds: 0 up, 0 in
            flags sortbitwise,require_jewel_osds
      pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
            0 kB used, 0 kB / 0 kB avail
                  64 creating

 5 挂载osd

    先准备好三个节点上的osd磁盘(分区,格式化,mount,或者lvm)这里就不做详细介绍了

[root@admin osd2]# df -Ph|grep osd
/dev/mapper/ceph-osd1     15G   33M   15G   1% /osd1
/dev/mapper/ceph-osd2     15G   33M   15G   1% /osd2
[root@node1 /]# df -Ph|grep osd
/dev/mapper/ceph-osd3     15G   33M   15G   1% /osd3
/dev/mapper/ceph-osd4     15G   33M   15G   1% /osd4
[root@node2 /]# df -Ph|grep osd
/dev/mapper/ceph-osd5     15G   33M   15G   1% /osd5
/dev/mapper/ceph-osd6     15G   33M   15G   1% /osd6

分别在三个节点上挂载osd

#osd1
[root@admin osd2]# docker run -d --net=host --name=osd1 -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /dev:/dev -v /osd1:/var/lib/ceph/osd --privileged=true docker.io/ceph/daemon:tag-build-master-jewel-centos-7 osd_directory
5e241e4fa243d5154ee9ac0982c4790d29f6df0ff820be76a812d676e134fc2c
#osd2
[root@admin osd2]# docker run -d --net=host --name=osd2 -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /dev:/dev -v /osd2:/var/lib/ceph/osd --privileged=true docker.io/ceph/daemon:tag-build-master-jewel-centos-7 osd_directory
cd857e4cd32b0c745a16654df302732cd1a4e54de26fe43753f8b5e1aae43829
                                                   osd1
#检查osd1 和osd2
[root@admin osd2]# docker ps|grep osd
cd857e4cd32b        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh osd_d"   About a minute ago   Up About a minute                                              osd2
5e241e4fa243        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh osd_d"   2 minutes ago        Up 2 minutes                                                   osd1

#osd3
[root@node1 /]# docker run -d --net=host --name=osd3 -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /dev:/dev -v /osd3:/var/lib/ceph/osd --privileged=true docker.io/ceph/daemon:tag-build-master-jewel-centos-7 osd_directory
5b895ce07e313111c2a023ac8755cc6a0bee9bc36640b71d69436e6fcf555636
#osd4
[root@node1 /]# docker run -d --net=host --name=osd4 -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /dev:/dev -v /osd4:/var/lib/ceph/osd --privileged=true docker.io/ceph/daemon:tag-build-master-jewel-centos-7 osd_directory
2e14db1a3049b57573fdc008a3f0051a02a1eacc10c5495ad996eebd51ad843c                                              osd3
#检查osd3和osd4
[root@node1 /]# docker ps|grep osd
2e14db1a3049        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh osd_d"   8 seconds ago       Up 7 seconds                                                   osd4
5b895ce07e31        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh osd_d"   33 seconds ago      Up 33 seconds                                                  osd3

#osd5
[root@node2 /]# docker run -d --net=host --name=osd5 -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /dev:/dev -v /osd5:/var/lib/ceph/osd --privileged=true docker.io/ceph/daemon:tag-build-master-jewel-centos-7 osd_directory
7af739ebd117f07db85ea2f281054259b1bb31b19ca21238e9395675c0bbf56c
#osd6
[root@node2 /]# docker run -d --net=host --name=osd6 -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /dev:/dev -v /osd6:/var/lib/ceph/osd --privileged=true docker.io/ceph/daemon:tag-build-master-jewel-centos-7 osd_directory
79cf854b8f30d129ee5c61ff892b8c2e89df29b38adfa111cdebb90933c71c32
# 检查osd5和osd6
[root@node2 /]# docker ps|grep osd
79cf854b8f30        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh osd_d"   22 seconds ago      Up 18 seconds                           osd6
7af739ebd117        docker.io/ceph/daemon:tag-build-master-jewel-centos-7   "/entrypoint.sh osd_d"   43 seconds ago      Up 41 seconds                           osd5

检查ceph 健康状况和osd tree

[root@admin osd2]# docker exec mon ceph -s
    cluster d4ec799c-1f54-4441-b19c-cd14a6a8710b
     health HEALTH_ERR
            clock skew detected on mon.node2
            64 pgs are stuck inactive for more than 300 seconds
            64 pgs stuck inactive
            64 pgs stuck unclean
            too few PGs per OSD (10 < min 30)
            Monitor clock skew detected 
     monmap e3: 3 mons at {admin=172.18.1.193:6789/0,node1=172.18.1.195:6789/0,node2=172.18.1.196:6789/0}
            election epoch 6, quorum 0,1,2 admin,node1,node2
     osdmap e13: 6 osds: 6 up, 6 in
            flags sortbitwise,require_jewel_osds
      pgmap v46: 64 pgs, 1 pools, 0 bytes data, 0 objects
            794 MB used, 90905 MB / 91700 MB avail
                  64 creating
# 上面显示64个pg一直处于creating状态,因为默认的rbd存储池就是有64个pg,但是由于osd异常到时pg无法创建成功
[root@admin osd2]# docker exec mon ceph osd pool get rbd pg_num
pg_num: 64
[root@admin osd2]# docker exec mon ceph osd tree
ID WEIGHT TYPE NAME    UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1      0 root default                                   
 0      0 osd.0             up  1.00000          1.00000 
 1      0 osd.1             up  1.00000          1.00000 
 2      0 osd.2             up  1.00000          1.00000 
 3      0 osd.3             up  1.00000          1.00000 
 4      0 osd.4             up  1.00000          1.00000 
 5      0 osd.5             up  1.00000          1.00000
# 上面显示所有osd都已经成功挂载。但是权重都是0,且都没有归属到某个节点下。应该是osd挂载后crushmap 没有生成

6 手动修复crushmap 

  6.1 添加crushmap

[root@admin osd2]# docker exec mon ceph osd pool get rbd pg_num
pg_num: 64
[root@admin osd2]# docker exec mon ceph osd crush add osd.0 0.15 host=admin
add item id 0 name 'osd.0' weight 0.15 at location {host=admin} to crush map
[root@admin osd2]# docker exec mon ceph osd crush add osd.1 0.15 host=admin
add item id 1 name 'osd.1' weight 0.15 at location {host=admin} to crush map
[root@admin osd2]# docker exec mon ceph osd crush add osd.2 0.15 host=node1
add item id 2 name 'osd.2' weight 0.15 at location {host=node1} to crush map
[root@admin osd2]# docker exec mon ceph osd crush add osd.3 0.15 host=node1
add item id 3 name 'osd.3' weight 0.15 at location {host=node1} to crush map
[root@admin osd2]# docker exec mon ceph osd crush add osd.4 0.15 host=node2
add item id 4 name 'osd.4' weight 0.15 at location {host=node2} to crush map
[root@admin osd2]# docker exec mon ceph osd crush add osd.5 0.15 host=node2
add item id 5 name 'osd.5' weight 0.15 at location {host=node2} to crush map
[root@admin osd2]# docker exec mon ceph osd tree
ID WEIGHT  TYPE NAME    UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-4 0.29999 host node2                                     
 4 0.14999     osd.4         up  1.00000          1.00000 
 5 0.14999     osd.5         up  1.00000          1.00000 
-3 0.29999 host node1                                     
 2 0.14999     osd.2         up  1.00000          1.00000 
 3 0.14999     osd.3         up  1.00000          1.00000 
-2 0.29999 host admin                                     
 0 0.14999     osd.0         up  1.00000          1.00000 
 1 0.14999     osd.1         up  1.00000          1.00000 
-1       0 root default  
# crushmap是有了,osd权重也有了,但是三个节点没有归属于root default下

6.2 更新crushmap使得三个节点都归属于root default

[root@admin osd2]# docker exec mon ceph osd crush move admin root=default
moved item id -2 name 'admin' to location {root=default} in crush map
[root@admin osd2]# docker exec mon ceph osd crush move node1 root=default
moved item id -3 name 'node1' to location {root=default} in crush map
[root@admin osd2]# docker exec mon ceph osd crush move node2 root=default
moved item id -4 name 'node2' to location {root=default} in crush map

检查ceph 监控状况

[root@admin osd2]# docker exec mon ceph -s
    cluster d4ec799c-1f54-4441-b19c-cd14a6a8710b
     health HEALTH_ERR
            clock skew detected on mon.node2
            4 pgs are stuck inactive for more than 300 seconds
            5 pgs degraded
            23 pgs peering
            4 pgs stuck inactive
            57 pgs stuck unclean
            5 pgs undersized
            Monitor clock skew detected 
     monmap e3: 3 mons at {admin=172.18.1.193:6789/0,node1=172.18.1.195:6789/0,node2=172.18.1.196:6789/0}
            election epoch 6, quorum 0,1,2 admin,node1,node2
     osdmap e29: 6 osds: 6 up, 6 in
            flags sortbitwise,require_jewel_osds
      pgmap v99: 64 pgs, 1 pools, 0 bytes data, 0 objects
            799 MB used, 90900 MB / 91700 MB avail
                  29 activating
                  15 remapped+peering
                   8 peering
                   7 active+clean
                   5 active+undersized+degraded
[root@admin osd2]# docker exec mon ceph -s
    cluster d4ec799c-1f54-4441-b19c-cd14a6a8710b
     health HEALTH_WARN
            clock skew detected on mon.node2
            16 pgs stuck unclean
            Monitor clock skew detected 
     monmap e3: 3 mons at {admin=172.18.1.193:6789/0,node1=172.18.1.195:6789/0,node2=172.18.1.196:6789/0}
            election epoch 6, quorum 0,1,2 admin,node1,node2
     osdmap e29: 6 osds: 6 up, 6 in
            flags sortbitwise,require_jewel_osds
      pgmap v104: 64 pgs, 1 pools, 0 bytes data, 0 objects
            799 MB used, 90900 MB / 91700 MB avail
                  48 active+clean
                  16 activating
[root@admin osd2]# docker exec mon ceph -s
    cluster d4ec799c-1f54-4441-b19c-cd14a6a8710b
     health HEALTH_WARN
            clock skew detected on mon.node2
            Monitor clock skew detected 
     monmap e3: 3 mons at {admin=172.18.1.193:6789/0,node1=172.18.1.195:6789/0,node2=172.18.1.196:6789/0}
            election epoch 6, quorum 0,1,2 admin,node1,node2
     osdmap e29: 6 osds: 6 up, 6 in
            flags sortbitwise,require_jewel_osds
      pgmap v108: 64 pgs, 1 pools, 0 bytes data, 0 objects
            799 MB used, 90900 MB / 91700 MB avail
                  64 active+clean

可以看到crushmap修复后pg已经可以正常创建和访问了。ceph的健康状态也好了。

7 测试ceph集群

测试ceph集群在块存储下镜像的创建和文件的上传,如果成功才能说明ceph集群安装成功。

[root@admin osd2]# docker exec mon rbd create rbd/test-image --size 100M
[root@admin osd2]# docker exec mon rbd ls rbd
test-image
[root@admin osd2]# docker exec mon rbd info rbd/test-image
rbd image 'test-image':
	size 102400 kB in 25 objects
	order 22 (4096 kB objects)
	block_name_prefix: rbd_data.1083238e1f29
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	flags: 
#块设备镜像创建成功
[root@admin osd2]# docker exec mon rados -p rbd put wzl /etc/fstab
[root@admin osd2]# docker exec mon rados -p rbd ls
rbd_header.1083238e1f29
wzl
rbd_object_map.1083238e1f29
rbd_directory
rbd_id.test-image
# 文件上传成功

到此为止利用docker容器安装ceph集群成功。后续的