Ceph分布式集群部署

1.Ceph环境准备

主机名 ip地址及磁盘 部署组件
node01 192.168.1.181(三块硬盘) Mon、mgr、rgs、osd
node02 192.168.1.182(三块硬盘) Mon、mgr、rgs、osd
node03 192.168.1.183(三块硬盘) Mon、mgr、rgs、osd

2.Hosts及防火墙设置

node01、node02、node03节点进行如下配置:

#添加hosts解析;

[root@node01 ~]# cat >/etc/hosts<<EOF 127.0.0.1 localhost localhost.localdomain 192.168.1.181 node01 192.168.1.182 node02 192.168.1.183 node03 EOF

#临时关闭selinux和防火墙;

[root@node01 ~]# sed -i '/SELINUX/s/enforcing/disabled/g' /etc/sysconfig/selinux

[root@node01 ~]# setenforce 0

[root@node01 ~]# systemctl stop firewalld.service

[root@node01 ~]# systemctl disable firewalld.service

#同步节点时间

[root@node01 ~]# yum install ntpdate -y

[root@node01 ~]# ntpdate pool.ntp.org

3.配置Ceph国内基础、EPEL和ceph yum源

#配置epel yum源

[root@node01 ~]# yum install -y epel-release

#使用阿里开源镜像提供的epel源

[root@node01 ~]# cd /etc/yum.repos.d

[root@node01 ~]# wget -O /etc/yum.repos.d/epel-7.repo http://mirrors.aliyun.com/repo/epel-7.repo

#配置ceph yum源

[root@node01 ~]# echo "[noarch]

name=ceph noarch

baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch

enabled=1

gpgcheck=0

[x86_64]

name=ceph x86_64

baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64

enabled=1

gpgcheck=0" > /etc/yum.repos.d/ceph.repo

4.在部署机上安装ceph-deploy(2.0版本+)

[root@node01 ~]# yum -y install ceph-deploy python-setuptools

[root@node01 ~]# yum install ceph-deploy python-setuptools
Loaded plugins: fastestmirror, langpacks
Repository epel is listed more than once in the configuration
Repository epel-debuginfo is listed more than once in the configuration
Repository epel-source is listed more than once in the configuration
Loading mirror speeds from cached hostfile

 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirror.bit.edu.cn
   base                                                            | 3.6 kB  00:00:00     
   epel                                                            | 4.7 kB  00:00:00     
   extras                                                          | 2.9 kB  00:00:00     
   noarch                                                          | 1.5 kB  00:00:00     
   updates                                                         | 2.9 kB  00:00:00     
   x86_64                                                          | 1.5 kB  00:00:00     
   (1/3): epel/x86_64/group_gz                                     |  95 kB  00:00:00     
   (2/3): epel/x86_64/updateinfo                                   | 1.0 MB  00:00:00     
   (3/3): epel/x86_64/primary_db                                   | 6.9 MB  00:00:01     
   Package python-setuptools-0.9.8-7.el7.noarch already installed and latest version
   Resolving Dependencies
   --> Running transaction check
   ---> Package ceph-deploy.noarch 0:2.0.1-0 will be installed
   --> Finished Dependency Resolution

Dependencies Resolved

=======================================================================================

 Package                Arch              Version              Repository         Size
=======================================================================================

Installing:
 ceph-deploy            noarch            2.0.1-0              noarch            286 k

Transaction Summary
=======================================================================================

Install  1 Package

Total download size: 286 k
Installed size: 1.2 M
Is this ok [y/d/N]: y
Downloading packages:
ceph-deploy-2.0.1-0.noarch.rpm                                  | 286 kB  00:00:00     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : ceph-deploy-2.0.1-0.noarch                                          1/1 
  Verifying  : ceph-deploy-2.0.1-0.noarch                                          1/1 

Installed:
  ceph-deploy.noarch 0:2.0.1-0                                                         

Complete!
[root@node01 ~]#
[root@node01 ~]# rpm -qa|grep ceph-deploy
ceph-deploy-2.0.1-0.noarch
[root@node01 ~]# rpm -qa|grep python-setuptools
python-setuptools-0.9.8-7.el7.noarch
[root@node01 ~]# 

5.更新其余节点的yum源

[root@node01 ~]# for host in node{02..03};do scp -r /etc/yum.repos.d/* $host:/etc/yum.repos.d/ ;done

[root@node01 ~]# for host in node{02..03};do scp -r /etc/yum.repos.d/* $host:/etc/yum.repos.d;done
The authenticity of host 'node02 (192.168.1.182)' can't be established.
ECDSA key fingerprint is SHA256:V1WcZifyTKUtNITjoIv89siGUFa/aRKqhVbuSZo44IA.
ECDSA key fingerprint is MD5:cb:08:52:c5:43:7c:fe:77:88:7e:cb:e3:4b:1b:26:ab.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node02,192.168.1.182' (ECDSA) to the list of known hosts.
root@node02's password: 
Permission denied, please try again.
root@node02's password: 
CentOS-Base.repo                                     100% 1664     2.1MB/s   00:00    
CentOS-CR.repo                                       100% 1309     1.5MB/s   00:00    
CentOS-Debuginfo.repo                                100%  649   741.5KB/s   00:00    
CentOS-fasttrack.repo                                100%  314   562.5KB/s   00:00    
CentOS-Media.repo                                    100%  630     1.0MB/s   00:00    
CentOS-Sources.repo                                  100% 1331     2.3MB/s   00:00    
CentOS-Vault.repo                                    100% 5701     8.8MB/s   00:00    
ceph.repo                                            100%  231   492.4KB/s   00:00    
epel-7.repo                                          100%  664     1.1MB/s   00:00    
epel.repo                                            100%  951     2.0MB/s   00:00    
epel-testing.repo                                    100% 1050     1.9MB/s   00:00    
The authenticity of host 'node03 (192.168.1.183)' can't be established.
ECDSA key fingerprint is SHA256:+J7KpBTku/CAcO+EjEyryEF0BuUaaDlKwxpimaJY0Qk.
ECDSA key fingerprint is MD5:b0:88:a3:db:50:1a:bd:6b:6b:0a:cf:7a:a2:81:38:c7.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node03,192.168.1.183' (ECDSA) to the list of known hosts.
root@node03's password: 
CentOS-Base.repo                                     100% 1664     2.0MB/s   00:00    
CentOS-CR.repo                                       100% 1309     1.4MB/s   00:00    
CentOS-Debuginfo.repo                                100%  649   555.0KB/s   00:00    
CentOS-fasttrack.repo                                100%  314   338.7KB/s   00:00    
CentOS-Media.repo                                    100%  630   892.6KB/s   00:00    
CentOS-Sources.repo                                  100% 1331     2.1MB/s   00:00    
CentOS-Vault.repo                                    100% 5701     7.3MB/s   00:00    
ceph.repo                                            100%  231   325.5KB/s   00:00    
epel-7.repo                                          100%  664     1.0MB/s   00:00    
epel.repo                                            100%  951     1.6MB/s   00:00    
epel-testing.repo                                    100% 1050     1.9MB/s   00:00    
[root@node01 ~]# 

6.在node01上安装相关包

[root@node01 ~]# for host in node{01..03};do ssh -l root $host yum install ceph ceph-radosgw -y ;done

[root@node01 ~]# ceph -v

[root@node01 ~]# rpm -qa |grep ceph

[root@node01 ~]# for host in node{01..03};do ssh -l root $host yum install ceph ceph-radosgw -y;done
The authenticity of host 'node01 (192.168.1.181)' can't be established.
ECDSA key fingerprint is SHA256:QfJ3r+TUqEFfG/D3a8YYN5twm0IT1vCI5OtIm28GX+M.
ECDSA key fingerprint is MD5:1f:36:26:8d:d8:63:81:9f:d6:30:a2:ed:ba:fb:db:ba.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node01,192.168.1.181' (ECDSA) to the list of known hosts.
root@node01's password: 
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
Repository epel is listed more than once in the configuration
Repository epel-debuginfo is listed more than once in the configuration
Repository epel-source is listed more than once in the configuration
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirror.bit.edu.cn
Resolving Dependencies
--> Running transaction check
---> Package ceph.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: ceph-mon = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-osd = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-mds = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-mgr = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
---> Package ceph-radosgw.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: librgw2 = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-selinux = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-base = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librados2 = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librados.so.2(LIBRADOS_14.2.0)(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: liboath.so.0()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: libceph-common.so.0()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librabbitmq.so.4()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Running transaction check
---> Package ceph-base.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: libcephfs2 = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: librbd1 = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-common = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: libleveldb.so.1()(64bit) for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: liblttng-ust.so.0()(64bit) for package: 2:ceph-base-14.2.12-0.el7.x86_64
---> Package ceph-mds.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-mgr.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: python-pecan for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-cherrypy for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-werkzeug for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-bcrypt for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
---> Package ceph-mon.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-osd.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-selinux.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: selinux-policy-base >= 3.13.1-252.el7_7.6 for package: 2:ceph-selinux-14.2.12-0.el7.x86_64
---> Package liboath.x86_64 0:2.6.2-1.el7 will be installed
---> Package librabbitmq.x86_64 0:0.8.0-2.el7 will be installed
---> Package librados2.x86_64 1:10.2.5-4.el7 will be updated
---> Package librados2.x86_64 2:14.2.12-0.el7 will be an update
---> Package librgw2.x86_64 2:14.2.12-0.el7 will be installed
--> Running transaction check
---> Package ceph-common.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: python-rgw = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libradosstriper1 = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-ceph-argparse = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-rbd = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-cephfs = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-rados = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-prettytable for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libbabeltrace-ctf.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: librdkafka.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libbabeltrace.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libradosstriper.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
---> Package leveldb.x86_64 0:1.12.0-11.el7 will be installed
---> Package libcephfs2.x86_64 2:14.2.12-0.el7 will be installed
---> Package librbd1.x86_64 1:10.2.5-4.el7 will be updated
---> Package librbd1.x86_64 2:14.2.12-0.el7 will be an update
---> Package lttng-ust.x86_64 0:2.4.1-4.el7 will be installed
--> Processing Dependency: liburcu-bp.so.1()(64bit) for package: lttng-ust-2.4.1-4.el7.x86_64
--> Processing Dependency: liburcu-cds.so.1()(64bit) for package: lttng-ust-2.4.1-4.el7.x86_64
---> Package python-cherrypy.noarch 0:3.2.2-4.el7 will be installed
---> Package python-pecan.noarch 0:0.4.5-2.el7 will be installed
--> Processing Dependency: python-mako >= 0.4.0 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-simplegeneric >= 0.8 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-webob >= 1.2 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-webtest >= 1.3.1 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-singledispatch for package: python-pecan-0.4.5-2.el7.noarch
---> Package python-werkzeug.noarch 0:0.9.1-2.el7 will be installed
---> Package python2-bcrypt.x86_64 0:3.1.6-2.el7 will be installed
--> Processing Dependency: python2-six for package: python2-bcrypt-3.1.6-2.el7.x86_64
---> Package selinux-policy-targeted.noarch 0:3.13.1-229.el7 will be updated
---> Package selinux-policy-targeted.noarch 0:3.13.1-266.el7_8.1 will be an update
--> Processing Dependency: selinux-policy = 3.13.1-266.el7_8.1 for package: selinux-policy-targeted-3.13.1-266.el7_8.1.noarch
--> Processing Dependency: selinux-policy = 3.13.1-266.el7_8.1 for package: selinux-policy-targeted-3.13.1-266.el7_8.1.noarch
--> Running transaction check
---> Package libbabeltrace.x86_64 0:1.2.4-3.el7 will be installed
---> Package libradosstriper1.x86_64 2:14.2.12-0.el7 will be installed
---> Package librdkafka.x86_64 0:0.11.5-1.el7 will be installed
---> Package python-ceph-argparse.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-cephfs.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-mako.noarch 0:0.8.1-2.el7 will be installed
--> Processing Dependency: python-markupsafe for package: python-mako-0.8.1-2.el7.noarch
--> Processing Dependency: python-beaker for package: python-mako-0.8.1-2.el7.noarch
---> Package python-prettytable.noarch 0:0.7.2-3.el7 will be installed
---> Package python-rados.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-rbd.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-rgw.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-simplegeneric.noarch 0:0.8-7.el7 will be installed
---> Package python-singledispatch.noarch 0:3.4.0.2-2.el7 will be installed
---> Package python-webob.noarch 0:1.2.3-7.el7 will be installed
---> Package python-webtest.noarch 0:1.3.4-6.el7 will be installed
---> Package python2-six.noarch 0:1.9.0-0.el7 will be installed
---> Package selinux-policy.noarch 0:3.13.1-229.el7 will be updated
---> Package selinux-policy.noarch 0:3.13.1-266.el7_8.1 will be an update
---> Package userspace-rcu.x86_64 0:0.7.16-1.el7 will be installed
--> Running transaction check
---> Package python-beaker.noarch 0:1.5.4-10.el7 will be installed
--> Processing Dependency: python-paste for package: python-beaker-1.5.4-10.el7.noarch
---> Package python-markupsafe.x86_64 0:0.11-10.el7 will be installed
--> Running transaction check
---> Package python-paste.noarch 0:1.7.5.1-9.20111221hg1498.el7 will be installed
--> Processing Dependency: python-tempita for package: python-paste-1.7.5.1-9.20111221hg1498.el7.noarch
--> Running transaction check
---> Package python-tempita.noarch 0:0.5.1-6.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package                  Arch    Version                        Repository
                                                                           Size
================================================================================
Installing:
 ceph                     x86_64  2:14.2.12-0.el7                x86_64   3.0 k
 ceph-radosgw             x86_64  2:14.2.12-0.el7                x86_64   5.8 M
Installing for dependencies:
 ceph-base                x86_64  2:14.2.12-0.el7                x86_64   5.4 M
 ceph-common              x86_64  2:14.2.12-0.el7                x86_64    19 M
 ceph-mds                 x86_64  2:14.2.12-0.el7                x86_64   1.9 M
 ceph-mgr                 x86_64  2:14.2.12-0.el7                x86_64   1.7 M
 ceph-mon                 x86_64  2:14.2.12-0.el7                x86_64   4.3 M
 ceph-osd                 x86_64  2:14.2.12-0.el7                x86_64    16 M
 ceph-selinux             x86_64  2:14.2.12-0.el7                x86_64    21 k
 leveldb                  x86_64  1.12.0-11.el7                  epel     161 k
 libbabeltrace            x86_64  1.2.4-3.el7                    epel     147 k
 libcephfs2               x86_64  2:14.2.12-0.el7                x86_64   500 k
 liboath                  x86_64  2.6.2-1.el7                    epel      51 k
 librabbitmq              x86_64  0.8.0-2.el7                    base      37 k
 libradosstriper1         x86_64  2:14.2.12-0.el7                x86_64   362 k
 librdkafka               x86_64  0.11.5-1.el7                   epel     368 k
 librgw2                  x86_64  2:14.2.12-0.el7                x86_64   5.1 M
 lttng-ust                x86_64  2.4.1-4.el7                    epel     176 k
 python-beaker            noarch  1.5.4-10.el7                   base      80 k
 python-ceph-argparse     x86_64  2:14.2.12-0.el7                x86_64    36 k
 python-cephfs            x86_64  2:14.2.12-0.el7                x86_64   112 k
 python-cherrypy          noarch  3.2.2-4.el7                    base     422 k
 python-mako              noarch  0.8.1-2.el7                    base     307 k
 python-markupsafe        x86_64  0.11-10.el7                    base      25 k
 python-paste             noarch  1.7.5.1-9.20111221hg1498.el7   base     866 k
 python-pecan             noarch  0.4.5-2.el7                    epel     255 k
 python-prettytable       noarch  0.7.2-3.el7                    base      37 k
 python-rados             x86_64  2:14.2.12-0.el7                x86_64   200 k
 python-rbd               x86_64  2:14.2.12-0.el7                x86_64   195 k
 python-rgw               x86_64  2:14.2.12-0.el7                x86_64    78 k
 python-simplegeneric     noarch  0.8-7.el7                      epel      12 k
 python-singledispatch    noarch  3.4.0.2-2.el7                  epel      18 k
 python-tempita           noarch  0.5.1-6.el7                    base      33 k
 python-webob             noarch  1.2.3-7.el7                    base     202 k
 python-webtest           noarch  1.3.4-6.el7                    base     102 k
 python-werkzeug          noarch  0.9.1-2.el7                    extras   562 k
 python2-bcrypt           x86_64  3.1.6-2.el7                    epel      39 k
 python2-six              noarch  1.9.0-0.el7                    epel     2.9 k
 userspace-rcu            x86_64  0.7.16-1.el7                   epel      73 k
Updating for dependencies:
 librados2                x86_64  2:14.2.12-0.el7                x86_64   3.4 M
 librbd1                  x86_64  2:14.2.12-0.el7                x86_64   1.7 M
 selinux-policy           noarch  3.13.1-266.el7_8.1             updates  497 k
 selinux-policy-targeted  noarch  3.13.1-266.el7_8.1             updates  7.0 M

Transaction Summary
================================================================================
Install  2 Packages (+37 Dependent packages)
Upgrade             (  4 Dependent packages)

Total size: 77 M
Total download size: 70 M
Downloading packages:
No Presto metadata available for x86_64
--------------------------------------------------------------------------------
Total                                              4.3 MB/s |  70 MB  00:16     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : leveldb-1.12.0-11.el7.x86_64                                1/47 
  Installing : liboath-2.6.2-1.el7.x86_64                                  2/47 
  Installing : librabbitmq-0.8.0-2.el7.x86_64                              3/47 
  Installing : python-webob-1.2.3-7.el7.noarch                             4/47 
  Installing : 2:python-ceph-argparse-14.2.12-0.el7.x86_64                 5/47 
  Installing : python-webtest-1.3.4-6.el7.noarch                           6/47 
  Installing : python-tempita-0.5.1-6.el7.noarch                           7/47 
  Installing : python-paste-1.7.5.1-9.20111221hg1498.el7.noarch            8/47 
  Installing : python-beaker-1.5.4-10.el7.noarch                           9/47 
  Installing : python-cherrypy-3.2.2-4.el7.noarch                         10/47 
  Installing : python-werkzeug-0.9.1-2.el7.noarch                         11/47 
  Installing : librdkafka-0.11.5-1.el7.x86_64                             12/47 
  Installing : python-markupsafe-0.11-10.el7.x86_64                       13/47 
  Installing : python-mako-0.8.1-2.el7.noarch                             14/47 
  Installing : python-singledispatch-3.4.0.2-2.el7.noarch                 15/47 
  Updating   : selinux-policy-3.13.1-266.el7_8.1.noarch                   16/47 
  Updating   : selinux-policy-targeted-3.13.1-266.el7_8.1.noarch          17/47 
  Installing : libbabeltrace-1.2.4-3.el7.x86_64                           18/47 
  Installing : python-simplegeneric-0.8-7.el7.noarch                      19/47 
  Installing : python-pecan-0.4.5-2.el7.noarch                            20/47 
  Installing : userspace-rcu-0.7.16-1.el7.x86_64                          21/47 
  Installing : lttng-ust-2.4.1-4.el7.x86_64                               22/47 
  Updating   : 2:librados2-14.2.12-0.el7.x86_64                           23/47 
  Installing : 2:python-rados-14.2.12-0.el7.x86_64                        24/47 
  Installing : 2:librgw2-14.2.12-0.el7.x86_64                             25/47 
  Installing : 2:libcephfs2-14.2.12-0.el7.x86_64                          26/47 
  Updating   : 2:librbd1-14.2.12-0.el7.x86_64                             27/47 
  Installing : 2:python-rbd-14.2.12-0.el7.x86_64                          28/47 
  Installing : 2:python-cephfs-14.2.12-0.el7.x86_64                       29/47 
  Installing : 2:python-rgw-14.2.12-0.el7.x86_64                          30/47 
  Installing : 2:libradosstriper1-14.2.12-0.el7.x86_64                    31/47 
  Installing : python-prettytable-0.7.2-3.el7.noarch                      32/47 
  Installing : 2:ceph-common-14.2.12-0.el7.x86_64                         33/47 
  Installing : 2:ceph-base-14.2.12-0.el7.x86_64                           34/47 
  Installing : 2:ceph-selinux-14.2.12-0.el7.x86_64                        35/47 
  Installing : 2:ceph-mon-14.2.12-0.el7.x86_64                            36/47 
  Installing : 2:ceph-osd-14.2.12-0.el7.x86_64                            37/47 
  Installing : 2:ceph-mds-14.2.12-0.el7.x86_64                            38/47 
  Installing : python2-six-1.9.0-0.el7.noarch                             39/47 
  Installing : python2-bcrypt-3.1.6-2.el7.x86_64                          40/47 
  Installing : 2:ceph-mgr-14.2.12-0.el7.x86_64                            41/47 
  Installing : 2:ceph-14.2.12-0.el7.x86_64                                42/47 
  Installing : 2:ceph-radosgw-14.2.12-0.el7.x86_64                        43/47 
  Cleanup    : selinux-policy-targeted-3.13.1-229.el7.noarch              44/47 
  Cleanup    : 1:librbd1-10.2.5-4.el7.x86_64                              45/47 
  Cleanup    : selinux-policy-3.13.1-229.el7.noarch                       46/47 
  Cleanup    : 1:librados2-10.2.5-4.el7.x86_64                            47/47 
  Verifying  : liboath-2.6.2-1.el7.x86_64                                  1/47 
  Verifying  : python2-six-1.9.0-0.el7.noarch                              2/47 
  Verifying  : 2:python-rbd-14.2.12-0.el7.x86_64                           3/47 
  Verifying  : python-prettytable-0.7.2-3.el7.noarch                       4/47 
  Verifying  : leveldb-1.12.0-11.el7.x86_64                                5/47 
  Verifying  : python2-bcrypt-3.1.6-2.el7.x86_64                           6/47 
  Verifying  : userspace-rcu-0.7.16-1.el7.x86_64                           7/47 
  Verifying  : python-simplegeneric-0.8-7.el7.noarch                       8/47 
  Verifying  : 2:python-cephfs-14.2.12-0.el7.x86_64                        9/47 
  Verifying  : libbabeltrace-1.2.4-3.el7.x86_64                           10/47 
  Verifying  : python-mako-0.8.1-2.el7.noarch                             11/47 
  Verifying  : 2:ceph-selinux-14.2.12-0.el7.x86_64                        12/47 
  Verifying  : selinux-policy-3.13.1-266.el7_8.1.noarch                   13/47 
  Verifying  : 2:librgw2-14.2.12-0.el7.x86_64                             14/47 
  Verifying  : 2:ceph-mon-14.2.12-0.el7.x86_64                            15/47 
  Verifying  : python-pecan-0.4.5-2.el7.noarch                            16/47 
  Verifying  : 2:ceph-radosgw-14.2.12-0.el7.x86_64                        17/47 
  Verifying  : python-singledispatch-3.4.0.2-2.el7.noarch                 18/47 
  Verifying  : python-markupsafe-0.11-10.el7.x86_64                       19/47 
  Verifying  : 2:ceph-common-14.2.12-0.el7.x86_64                         20/47 
  Verifying  : 2:libcephfs2-14.2.12-0.el7.x86_64                          21/47 
  Verifying  : librdkafka-0.11.5-1.el7.x86_64                             22/47 
  Verifying  : python-paste-1.7.5.1-9.20111221hg1498.el7.noarch           23/47 
  Verifying  : lttng-ust-2.4.1-4.el7.x86_64                               24/47 
  Verifying  : 2:ceph-osd-14.2.12-0.el7.x86_64                            25/47 
  Verifying  : python-werkzeug-0.9.1-2.el7.noarch                         26/47 
  Verifying  : 2:python-rados-14.2.12-0.el7.x86_64                        27/47 
  Verifying  : 2:libradosstriper1-14.2.12-0.el7.x86_64                    28/47 
  Verifying  : 2:ceph-base-14.2.12-0.el7.x86_64                           29/47 
  Verifying  : 2:ceph-mgr-14.2.12-0.el7.x86_64                            30/47 
  Verifying  : python-cherrypy-3.2.2-4.el7.noarch                         31/47 
  Verifying  : python-beaker-1.5.4-10.el7.noarch                          32/47 
  Verifying  : 2:python-rgw-14.2.12-0.el7.x86_64                          33/47 
  Verifying  : 2:librados2-14.2.12-0.el7.x86_64                           34/47 
  Verifying  : 2:ceph-mds-14.2.12-0.el7.x86_64                            35/47 
  Verifying  : selinux-policy-targeted-3.13.1-266.el7_8.1.noarch          36/47 
  Verifying  : python-tempita-0.5.1-6.el7.noarch                          37/47 
  Verifying  : python-webtest-1.3.4-6.el7.noarch                          38/47 
  Verifying  : 2:python-ceph-argparse-14.2.12-0.el7.x86_64                39/47 
  Verifying  : librabbitmq-0.8.0-2.el7.x86_64                             40/47 
  Verifying  : 2:librbd1-14.2.12-0.el7.x86_64                             41/47 
  Verifying  : python-webob-1.2.3-7.el7.noarch                            42/47 
  Verifying  : 2:ceph-14.2.12-0.el7.x86_64                                43/47 
  Verifying  : 1:librbd1-10.2.5-4.el7.x86_64                              44/47 
  Verifying  : selinux-policy-3.13.1-229.el7.noarch                       45/47 
  Verifying  : selinux-policy-targeted-3.13.1-229.el7.noarch              46/47 
  Verifying  : 1:librados2-10.2.5-4.el7.x86_64                            47/47 

Installed:
  ceph.x86_64 2:14.2.12-0.el7        ceph-radosgw.x86_64 2:14.2.12-0.el7       

Dependency Installed:
  ceph-base.x86_64 2:14.2.12-0.el7                                              
  ceph-common.x86_64 2:14.2.12-0.el7                                            
  ceph-mds.x86_64 2:14.2.12-0.el7                                               
  ceph-mgr.x86_64 2:14.2.12-0.el7                                               
  ceph-mon.x86_64 2:14.2.12-0.el7                                               
  ceph-osd.x86_64 2:14.2.12-0.el7                                               
  ceph-selinux.x86_64 2:14.2.12-0.el7                                           
  leveldb.x86_64 0:1.12.0-11.el7                                                
  libbabeltrace.x86_64 0:1.2.4-3.el7                                            
  libcephfs2.x86_64 2:14.2.12-0.el7                                             
  liboath.x86_64 0:2.6.2-1.el7                                                  
  librabbitmq.x86_64 0:0.8.0-2.el7                                              
  libradosstriper1.x86_64 2:14.2.12-0.el7                                       
  librdkafka.x86_64 0:0.11.5-1.el7                                              
  librgw2.x86_64 2:14.2.12-0.el7                                                
  lttng-ust.x86_64 0:2.4.1-4.el7                                                
  python-beaker.noarch 0:1.5.4-10.el7                                           
  python-ceph-argparse.x86_64 2:14.2.12-0.el7                                   
  python-cephfs.x86_64 2:14.2.12-0.el7                                          
  python-cherrypy.noarch 0:3.2.2-4.el7                                          
  python-mako.noarch 0:0.8.1-2.el7                                              
  python-markupsafe.x86_64 0:0.11-10.el7                                        
  python-paste.noarch 0:1.7.5.1-9.20111221hg1498.el7                            
  python-pecan.noarch 0:0.4.5-2.el7                                             
  python-prettytable.noarch 0:0.7.2-3.el7                                       
  python-rados.x86_64 2:14.2.12-0.el7                                           
  python-rbd.x86_64 2:14.2.12-0.el7                                             
  python-rgw.x86_64 2:14.2.12-0.el7                                             
  python-simplegeneric.noarch 0:0.8-7.el7                                       
  python-singledispatch.noarch 0:3.4.0.2-2.el7                                  
  python-tempita.noarch 0:0.5.1-6.el7                                           
  python-webob.noarch 0:1.2.3-7.el7                                             
  python-webtest.noarch 0:1.3.4-6.el7                                           
  python-werkzeug.noarch 0:0.9.1-2.el7                                          
  python2-bcrypt.x86_64 0:3.1.6-2.el7                                           
  python2-six.noarch 0:1.9.0-0.el7                                              
  userspace-rcu.x86_64 0:0.7.16-1.el7                                           

Dependency Updated:
  librados2.x86_64 2:14.2.12-0.el7                                              
  librbd1.x86_64 2:14.2.12-0.el7                                                
  selinux-policy.noarch 0:3.13.1-266.el7_8.1                                    
  selinux-policy-targeted.noarch 0:3.13.1-266.el7_8.1                           

Complete!
root@node02's password: 
Loaded plugins: fastestmirror, langpacks, product-id, search-disabled-repos,
              : subscription-manager

This system is not registered with an entitlement server. You can use subscription-manager to register.

Repository epel is listed more than once in the configuration
Repository epel-debuginfo is listed more than once in the configuration
Repository epel-source is listed more than once in the configuration
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.bfsu.edu.cn
 * updates: mirror.bit.edu.cn
Resolving Dependencies
--> Running transaction check
---> Package ceph.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: ceph-mon = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-osd = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-mds = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-mgr = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
---> Package ceph-radosgw.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: librgw2 = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-selinux = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-base = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librados2 = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librados.so.2(LIBRADOS_14.2.0)(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: liboath.so.0()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: libceph-common.so.0()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librabbitmq.so.4()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Running transaction check
---> Package ceph-base.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: libcephfs2 = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: librbd1 = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-common = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: libleveldb.so.1()(64bit) for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: liblttng-ust.so.0()(64bit) for package: 2:ceph-base-14.2.12-0.el7.x86_64
---> Package ceph-mds.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-mgr.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: python-pecan for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-cherrypy for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-werkzeug for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-bcrypt for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
---> Package ceph-mon.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-osd.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-selinux.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: selinux-policy-base >= 3.13.1-252.el7_7.6 for package: 2:ceph-selinux-14.2.12-0.el7.x86_64
---> Package liboath.x86_64 0:2.6.2-1.el7 will be installed
---> Package librabbitmq.x86_64 0:0.8.0-2.el7 will be installed
---> Package librados2.x86_64 1:10.2.5-4.el7 will be updated
---> Package librados2.x86_64 2:14.2.12-0.el7 will be an update
---> Package librgw2.x86_64 2:14.2.12-0.el7 will be installed
--> Running transaction check
---> Package ceph-common.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: python-rgw = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libradosstriper1 = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-ceph-argparse = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-rbd = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-cephfs = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-rados = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-prettytable for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libbabeltrace-ctf.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: librdkafka.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libbabeltrace.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libradosstriper.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
---> Package leveldb.x86_64 0:1.12.0-11.el7 will be installed
---> Package libcephfs2.x86_64 2:14.2.12-0.el7 will be installed
---> Package librbd1.x86_64 1:10.2.5-4.el7 will be updated
---> Package librbd1.x86_64 2:14.2.12-0.el7 will be an update
---> Package lttng-ust.x86_64 0:2.4.1-4.el7 will be installed
--> Processing Dependency: liburcu-bp.so.1()(64bit) for package: lttng-ust-2.4.1-4.el7.x86_64
--> Processing Dependency: liburcu-cds.so.1()(64bit) for package: lttng-ust-2.4.1-4.el7.x86_64
---> Package python-cherrypy.noarch 0:3.2.2-4.el7 will be installed
---> Package python-pecan.noarch 0:0.4.5-2.el7 will be installed
--> Processing Dependency: python-mako >= 0.4.0 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-simplegeneric >= 0.8 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-webob >= 1.2 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-webtest >= 1.3.1 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-singledispatch for package: python-pecan-0.4.5-2.el7.noarch
---> Package python-werkzeug.noarch 0:0.9.1-2.el7 will be installed
---> Package python2-bcrypt.x86_64 0:3.1.6-2.el7 will be installed
--> Processing Dependency: python2-six for package: python2-bcrypt-3.1.6-2.el7.x86_64
---> Package selinux-policy-targeted.noarch 0:3.13.1-229.el7 will be updated
---> Package selinux-policy-targeted.noarch 0:3.13.1-266.el7_8.1 will be an update
--> Processing Dependency: selinux-policy = 3.13.1-266.el7_8.1 for package: selinux-policy-targeted-3.13.1-266.el7_8.1.noarch
--> Processing Dependency: selinux-policy = 3.13.1-266.el7_8.1 for package: selinux-policy-targeted-3.13.1-266.el7_8.1.noarch
--> Running transaction check
---> Package libbabeltrace.x86_64 0:1.2.4-3.el7 will be installed
---> Package libradosstriper1.x86_64 2:14.2.12-0.el7 will be installed
---> Package librdkafka.x86_64 0:0.11.5-1.el7 will be installed
---> Package python-ceph-argparse.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-cephfs.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-mako.noarch 0:0.8.1-2.el7 will be installed
--> Processing Dependency: python-markupsafe for package: python-mako-0.8.1-2.el7.noarch
--> Processing Dependency: python-beaker for package: python-mako-0.8.1-2.el7.noarch
---> Package python-prettytable.noarch 0:0.7.2-3.el7 will be installed
---> Package python-rados.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-rbd.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-rgw.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-simplegeneric.noarch 0:0.8-7.el7 will be installed
---> Package python-singledispatch.noarch 0:3.4.0.2-2.el7 will be installed
---> Package python-webob.noarch 0:1.2.3-7.el7 will be installed
---> Package python-webtest.noarch 0:1.3.4-6.el7 will be installed
---> Package python2-six.noarch 0:1.9.0-0.el7 will be installed
---> Package selinux-policy.noarch 0:3.13.1-229.el7 will be updated
---> Package selinux-policy.noarch 0:3.13.1-266.el7_8.1 will be an update
---> Package userspace-rcu.x86_64 0:0.7.16-1.el7 will be installed
--> Running transaction check
---> Package python-beaker.noarch 0:1.5.4-10.el7 will be installed
--> Processing Dependency: python-paste for package: python-beaker-1.5.4-10.el7.noarch
---> Package python-markupsafe.x86_64 0:0.11-10.el7 will be installed
--> Running transaction check
---> Package python-paste.noarch 0:1.7.5.1-9.20111221hg1498.el7 will be installed
--> Processing Dependency: python-tempita for package: python-paste-1.7.5.1-9.20111221hg1498.el7.noarch
--> Running transaction check
---> Package python-tempita.noarch 0:0.5.1-6.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package                  Arch    Version                        Repository
                                                                           Size
================================================================================
Installing:
 ceph                     x86_64  2:14.2.12-0.el7                x86_64   3.0 k
 ceph-radosgw             x86_64  2:14.2.12-0.el7                x86_64   5.8 M
Installing for dependencies:
 ceph-base                x86_64  2:14.2.12-0.el7                x86_64   5.4 M
 ceph-common              x86_64  2:14.2.12-0.el7                x86_64    19 M
 ceph-mds                 x86_64  2:14.2.12-0.el7                x86_64   1.9 M
 ceph-mgr                 x86_64  2:14.2.12-0.el7                x86_64   1.7 M
 ceph-mon                 x86_64  2:14.2.12-0.el7                x86_64   4.3 M
 ceph-osd                 x86_64  2:14.2.12-0.el7                x86_64    16 M
 ceph-selinux             x86_64  2:14.2.12-0.el7                x86_64    21 k
 leveldb                  x86_64  1.12.0-11.el7                  epel     161 k
 libbabeltrace            x86_64  1.2.4-3.el7                    epel     147 k
 libcephfs2               x86_64  2:14.2.12-0.el7                x86_64   500 k
 liboath                  x86_64  2.6.2-1.el7                    epel      51 k
 librabbitmq              x86_64  0.8.0-2.el7                    base      37 k
 libradosstriper1         x86_64  2:14.2.12-0.el7                x86_64   362 k
 librdkafka               x86_64  0.11.5-1.el7                   epel     368 k
 librgw2                  x86_64  2:14.2.12-0.el7                x86_64   5.1 M
 lttng-ust                x86_64  2.4.1-4.el7                    epel     176 k
 python-beaker            noarch  1.5.4-10.el7                   base      80 k
 python-ceph-argparse     x86_64  2:14.2.12-0.el7                x86_64    36 k
 python-cephfs            x86_64  2:14.2.12-0.el7                x86_64   112 k
 python-cherrypy          noarch  3.2.2-4.el7                    base     422 k
 python-mako              noarch  0.8.1-2.el7                    base     307 k
 python-markupsafe        x86_64  0.11-10.el7                    base      25 k
 python-paste             noarch  1.7.5.1-9.20111221hg1498.el7   base     866 k
 python-pecan             noarch  0.4.5-2.el7                    epel     255 k
 python-prettytable       noarch  0.7.2-3.el7                    base      37 k
 python-rados             x86_64  2:14.2.12-0.el7                x86_64   200 k
 python-rbd               x86_64  2:14.2.12-0.el7                x86_64   195 k
 python-rgw               x86_64  2:14.2.12-0.el7                x86_64    78 k
 python-simplegeneric     noarch  0.8-7.el7                      epel      12 k
 python-singledispatch    noarch  3.4.0.2-2.el7                  epel      18 k
 python-tempita           noarch  0.5.1-6.el7                    base      33 k
 python-webob             noarch  1.2.3-7.el7                    base     202 k
 python-webtest           noarch  1.3.4-6.el7                    base     102 k
 python-werkzeug          noarch  0.9.1-2.el7                    extras   562 k
 python2-bcrypt           x86_64  3.1.6-2.el7                    epel      39 k
 python2-six              noarch  1.9.0-0.el7                    epel     2.9 k
 userspace-rcu            x86_64  0.7.16-1.el7                   epel      73 k
Updating for dependencies:
 librados2                x86_64  2:14.2.12-0.el7                x86_64   3.4 M
 librbd1                  x86_64  2:14.2.12-0.el7                x86_64   1.7 M
 selinux-policy           noarch  3.13.1-266.el7_8.1             updates  497 k
 selinux-policy-targeted  noarch  3.13.1-266.el7_8.1             updates  7.0 M

Transaction Summary
================================================================================
Install  2 Packages (+37 Dependent packages)
Upgrade             (  4 Dependent packages)

Total download size: 77 M
Downloading packages:
No Presto metadata available for x86_64
No Presto metadata available for updates
--------------------------------------------------------------------------------
Total                                              6.2 MB/s |  77 MB  00:12     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : leveldb-1.12.0-11.el7.x86_64                                1/47 
  Installing : liboath-2.6.2-1.el7.x86_64                                  2/47 
  Installing : librabbitmq-0.8.0-2.el7.x86_64                              3/47 
  Installing : python-webob-1.2.3-7.el7.noarch                             4/47 
  Installing : 2:python-ceph-argparse-14.2.12-0.el7.x86_64                 5/47 
  Installing : python-webtest-1.3.4-6.el7.noarch                           6/47 
  Installing : python-tempita-0.5.1-6.el7.noarch                           7/47 
  Installing : python-paste-1.7.5.1-9.20111221hg1498.el7.noarch            8/47 
  Installing : python-beaker-1.5.4-10.el7.noarch                           9/47 
  Installing : python-cherrypy-3.2.2-4.el7.noarch                         10/47 
  Installing : python-werkzeug-0.9.1-2.el7.noarch                         11/47 
  Installing : librdkafka-0.11.5-1.el7.x86_64                             12/47 
  Installing : python-markupsafe-0.11-10.el7.x86_64                       13/47 
  Installing : python-mako-0.8.1-2.el7.noarch                             14/47 
  Installing : python-singledispatch-3.4.0.2-2.el7.noarch                 15/47 
  Updating   : selinux-policy-3.13.1-266.el7_8.1.noarch                   16/47 
  Updating   : selinux-policy-targeted-3.13.1-266.el7_8.1.noarch          17/47 
  Installing : libbabeltrace-1.2.4-3.el7.x86_64                           18/47 
  Installing : python-simplegeneric-0.8-7.el7.noarch                      19/47 
  Installing : python-pecan-0.4.5-2.el7.noarch                            20/47 
  Installing : userspace-rcu-0.7.16-1.el7.x86_64                          21/47 
  Installing : lttng-ust-2.4.1-4.el7.x86_64                               22/47 
  Updating   : 2:librados2-14.2.12-0.el7.x86_64                           23/47 
  Installing : 2:python-rados-14.2.12-0.el7.x86_64                        24/47 
  Installing : 2:librgw2-14.2.12-0.el7.x86_64                             25/47 
  Installing : 2:libcephfs2-14.2.12-0.el7.x86_64                          26/47 
  Updating   : 2:librbd1-14.2.12-0.el7.x86_64                             27/47 
  Installing : 2:python-rbd-14.2.12-0.el7.x86_64                          28/47 
  Installing : 2:python-cephfs-14.2.12-0.el7.x86_64                       29/47 
  Installing : 2:python-rgw-14.2.12-0.el7.x86_64                          30/47 
  Installing : 2:libradosstriper1-14.2.12-0.el7.x86_64                    31/47 
  Installing : python-prettytable-0.7.2-3.el7.noarch                      32/47 
  Installing : 2:ceph-common-14.2.12-0.el7.x86_64                         33/47 
  Installing : 2:ceph-base-14.2.12-0.el7.x86_64                           34/47 
  Installing : 2:ceph-selinux-14.2.12-0.el7.x86_64                        35/47 
  Installing : 2:ceph-mon-14.2.12-0.el7.x86_64                            36/47 
  Installing : 2:ceph-osd-14.2.12-0.el7.x86_64                            37/47 
  Installing : 2:ceph-mds-14.2.12-0.el7.x86_64                            38/47 
  Installing : python2-six-1.9.0-0.el7.noarch                             39/47 
  Installing : python2-bcrypt-3.1.6-2.el7.x86_64                          40/47 
  Installing : 2:ceph-mgr-14.2.12-0.el7.x86_64                            41/47 
  Installing : 2:ceph-14.2.12-0.el7.x86_64                                42/47 
  Installing : 2:ceph-radosgw-14.2.12-0.el7.x86_64                        43/47 
  Cleanup    : selinux-policy-targeted-3.13.1-229.el7.noarch              44/47 
  Cleanup    : 1:librbd1-10.2.5-4.el7.x86_64                              45/47 
  Cleanup    : selinux-policy-3.13.1-229.el7.noarch                       46/47 
  Cleanup    : 1:librados2-10.2.5-4.el7.x86_64                            47/47 
  Verifying  : liboath-2.6.2-1.el7.x86_64                                  1/47 
  Verifying  : python2-six-1.9.0-0.el7.noarch                              2/47 
  Verifying  : 2:python-rbd-14.2.12-0.el7.x86_64                           3/47 
  Verifying  : python-prettytable-0.7.2-3.el7.noarch                       4/47 
  Verifying  : leveldb-1.12.0-11.el7.x86_64                                5/47 
  Verifying  : python2-bcrypt-3.1.6-2.el7.x86_64                           6/47 
  Verifying  : userspace-rcu-0.7.16-1.el7.x86_64                           7/47 
  Verifying  : python-simplegeneric-0.8-7.el7.noarch                       8/47 
  Verifying  : 2:python-cephfs-14.2.12-0.el7.x86_64                        9/47 
  Verifying  : libbabeltrace-1.2.4-3.el7.x86_64                           10/47 
  Verifying  : python-mako-0.8.1-2.el7.noarch                             11/47 
  Verifying  : 2:ceph-selinux-14.2.12-0.el7.x86_64                        12/47 
  Verifying  : selinux-policy-3.13.1-266.el7_8.1.noarch                   13/47 
  Verifying  : 2:librgw2-14.2.12-0.el7.x86_64                             14/47 
  Verifying  : 2:ceph-mon-14.2.12-0.el7.x86_64                            15/47 
  Verifying  : python-pecan-0.4.5-2.el7.noarch                            16/47 
  Verifying  : 2:ceph-radosgw-14.2.12-0.el7.x86_64                        17/47 
  Verifying  : python-singledispatch-3.4.0.2-2.el7.noarch                 18/47 
  Verifying  : python-markupsafe-0.11-10.el7.x86_64                       19/47 
  Verifying  : 2:ceph-common-14.2.12-0.el7.x86_64                         20/47 
  Verifying  : 2:libcephfs2-14.2.12-0.el7.x86_64                          21/47 
  Verifying  : librdkafka-0.11.5-1.el7.x86_64                             22/47 
  Verifying  : python-paste-1.7.5.1-9.20111221hg1498.el7.noarch           23/47 
  Verifying  : lttng-ust-2.4.1-4.el7.x86_64                               24/47 
  Verifying  : 2:ceph-osd-14.2.12-0.el7.x86_64                            25/47 
  Verifying  : python-werkzeug-0.9.1-2.el7.noarch                         26/47 
  Verifying  : 2:python-rados-14.2.12-0.el7.x86_64                        27/47 
  Verifying  : 2:libradosstriper1-14.2.12-0.el7.x86_64                    28/47 
  Verifying  : 2:ceph-base-14.2.12-0.el7.x86_64                           29/47 
  Verifying  : 2:ceph-mgr-14.2.12-0.el7.x86_64                            30/47 
  Verifying  : python-cherrypy-3.2.2-4.el7.noarch                         31/47 
  Verifying  : python-beaker-1.5.4-10.el7.noarch                          32/47 
  Verifying  : 2:python-rgw-14.2.12-0.el7.x86_64                          33/47 
  Verifying  : 2:librados2-14.2.12-0.el7.x86_64                           34/47 
  Verifying  : 2:ceph-mds-14.2.12-0.el7.x86_64                            35/47 
  Verifying  : selinux-policy-targeted-3.13.1-266.el7_8.1.noarch          36/47 
  Verifying  : python-tempita-0.5.1-6.el7.noarch                          37/47 
  Verifying  : python-webtest-1.3.4-6.el7.noarch                          38/47 
  Verifying  : 2:python-ceph-argparse-14.2.12-0.el7.x86_64                39/47 
  Verifying  : librabbitmq-0.8.0-2.el7.x86_64                             40/47 
  Verifying  : 2:librbd1-14.2.12-0.el7.x86_64                             41/47 
  Verifying  : python-webob-1.2.3-7.el7.noarch                            42/47 
  Verifying  : 2:ceph-14.2.12-0.el7.x86_64                                43/47 
  Verifying  : 1:librbd1-10.2.5-4.el7.x86_64                              44/47 
  Verifying  : selinux-policy-3.13.1-229.el7.noarch                       45/47 
  Verifying  : selinux-policy-targeted-3.13.1-229.el7.noarch              46/47 
  Verifying  : 1:librados2-10.2.5-4.el7.x86_64                            47/47 

Installed:
  ceph.x86_64 2:14.2.12-0.el7        ceph-radosgw.x86_64 2:14.2.12-0.el7       

Dependency Installed:
  ceph-base.x86_64 2:14.2.12-0.el7                                              
  ceph-common.x86_64 2:14.2.12-0.el7                                            
  ceph-mds.x86_64 2:14.2.12-0.el7                                               
  ceph-mgr.x86_64 2:14.2.12-0.el7                                               
  ceph-mon.x86_64 2:14.2.12-0.el7                                               
  ceph-osd.x86_64 2:14.2.12-0.el7                                               
  ceph-selinux.x86_64 2:14.2.12-0.el7                                           
  leveldb.x86_64 0:1.12.0-11.el7                                                
  libbabeltrace.x86_64 0:1.2.4-3.el7                                            
  libcephfs2.x86_64 2:14.2.12-0.el7                                             
  liboath.x86_64 0:2.6.2-1.el7                                                  
  librabbitmq.x86_64 0:0.8.0-2.el7                                              
  libradosstriper1.x86_64 2:14.2.12-0.el7                                       
  librdkafka.x86_64 0:0.11.5-1.el7                                              
  librgw2.x86_64 2:14.2.12-0.el7                                                
  lttng-ust.x86_64 0:2.4.1-4.el7                                                
  python-beaker.noarch 0:1.5.4-10.el7                                           
  python-ceph-argparse.x86_64 2:14.2.12-0.el7                                   
  python-cephfs.x86_64 2:14.2.12-0.el7                                          
  python-cherrypy.noarch 0:3.2.2-4.el7                                          
  python-mako.noarch 0:0.8.1-2.el7                                              
  python-markupsafe.x86_64 0:0.11-10.el7                                        
  python-paste.noarch 0:1.7.5.1-9.20111221hg1498.el7                            
  python-pecan.noarch 0:0.4.5-2.el7                                             
  python-prettytable.noarch 0:0.7.2-3.el7                                       
  python-rados.x86_64 2:14.2.12-0.el7                                           
  python-rbd.x86_64 2:14.2.12-0.el7                                             
  python-rgw.x86_64 2:14.2.12-0.el7                                             
  python-simplegeneric.noarch 0:0.8-7.el7                                       
  python-singledispatch.noarch 0:3.4.0.2-2.el7                                  
  python-tempita.noarch 0:0.5.1-6.el7                                           
  python-webob.noarch 0:1.2.3-7.el7                                             
  python-webtest.noarch 0:1.3.4-6.el7                                           
  python-werkzeug.noarch 0:0.9.1-2.el7                                          
  python2-bcrypt.x86_64 0:3.1.6-2.el7                                           
  python2-six.noarch 0:1.9.0-0.el7                                              
  userspace-rcu.x86_64 0:0.7.16-1.el7                                           

Dependency Updated:
  librados2.x86_64 2:14.2.12-0.el7                                              
  librbd1.x86_64 2:14.2.12-0.el7                                                
  selinux-policy.noarch 0:3.13.1-266.el7_8.1                                    
  selinux-policy-targeted.noarch 0:3.13.1-266.el7_8.1                           

Complete!
root@node03's password: 
Loaded plugins: fastestmirror, langpacks, product-id, search-disabled-repos,
              : subscription-manager

This system is not registered with an entitlement server. You can use subscription-manager to register.

Repository epel is listed more than once in the configuration
Repository epel-debuginfo is listed more than once in the configuration
Repository epel-source is listed more than once in the configuration
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.huaweicloud.com
 * updates: mirror.bit.edu.cn
Resolving Dependencies
--> Running transaction check
---> Package ceph.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: ceph-mon = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-osd = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-mds = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-mgr = 2:14.2.12-0.el7 for package: 2:ceph-14.2.12-0.el7.x86_64
---> Package ceph-radosgw.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: librgw2 = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-selinux = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-base = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librados2 = 2:14.2.12-0.el7 for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librados.so.2(LIBRADOS_14.2.0)(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: liboath.so.0()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: libceph-common.so.0()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Processing Dependency: librabbitmq.so.4()(64bit) for package: 2:ceph-radosgw-14.2.12-0.el7.x86_64
--> Running transaction check
---> Package ceph-base.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: libcephfs2 = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: librbd1 = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: ceph-common = 2:14.2.12-0.el7 for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: libleveldb.so.1()(64bit) for package: 2:ceph-base-14.2.12-0.el7.x86_64
--> Processing Dependency: liblttng-ust.so.0()(64bit) for package: 2:ceph-base-14.2.12-0.el7.x86_64
---> Package ceph-mds.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-mgr.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: python-pecan for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-cherrypy for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-werkzeug for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
--> Processing Dependency: python-bcrypt for package: 2:ceph-mgr-14.2.12-0.el7.x86_64
---> Package ceph-mon.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-osd.x86_64 2:14.2.12-0.el7 will be installed
---> Package ceph-selinux.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: selinux-policy-base >= 3.13.1-252.el7_7.6 for package: 2:ceph-selinux-14.2.12-0.el7.x86_64
---> Package liboath.x86_64 0:2.6.2-1.el7 will be installed
---> Package librabbitmq.x86_64 0:0.8.0-2.el7 will be installed
---> Package librados2.x86_64 1:10.2.5-4.el7 will be updated
---> Package librados2.x86_64 2:14.2.12-0.el7 will be an update
---> Package librgw2.x86_64 2:14.2.12-0.el7 will be installed
--> Running transaction check
---> Package ceph-common.x86_64 2:14.2.12-0.el7 will be installed
--> Processing Dependency: python-rgw = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libradosstriper1 = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-ceph-argparse = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-rbd = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-cephfs = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-rados = 2:14.2.12-0.el7 for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: python-prettytable for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libbabeltrace-ctf.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: librdkafka.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libbabeltrace.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
--> Processing Dependency: libradosstriper.so.1()(64bit) for package: 2:ceph-common-14.2.12-0.el7.x86_64
---> Package leveldb.x86_64 0:1.12.0-11.el7 will be installed
---> Package libcephfs2.x86_64 2:14.2.12-0.el7 will be installed
---> Package librbd1.x86_64 1:10.2.5-4.el7 will be updated
---> Package librbd1.x86_64 2:14.2.12-0.el7 will be an update
---> Package lttng-ust.x86_64 0:2.4.1-4.el7 will be installed
--> Processing Dependency: liburcu-bp.so.1()(64bit) for package: lttng-ust-2.4.1-4.el7.x86_64
--> Processing Dependency: liburcu-cds.so.1()(64bit) for package: lttng-ust-2.4.1-4.el7.x86_64
---> Package python-cherrypy.noarch 0:3.2.2-4.el7 will be installed
---> Package python-pecan.noarch 0:0.4.5-2.el7 will be installed
--> Processing Dependency: python-mako >= 0.4.0 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-simplegeneric >= 0.8 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-webob >= 1.2 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-webtest >= 1.3.1 for package: python-pecan-0.4.5-2.el7.noarch
--> Processing Dependency: python-singledispatch for package: python-pecan-0.4.5-2.el7.noarch
---> Package python-werkzeug.noarch 0:0.9.1-2.el7 will be installed
---> Package python2-bcrypt.x86_64 0:3.1.6-2.el7 will be installed
--> Processing Dependency: python2-six for package: python2-bcrypt-3.1.6-2.el7.x86_64
---> Package selinux-policy-targeted.noarch 0:3.13.1-229.el7 will be updated
---> Package selinux-policy-targeted.noarch 0:3.13.1-266.el7_8.1 will be an update
--> Processing Dependency: selinux-policy = 3.13.1-266.el7_8.1 for package: selinux-policy-targeted-3.13.1-266.el7_8.1.noarch
--> Processing Dependency: selinux-policy = 3.13.1-266.el7_8.1 for package: selinux-policy-targeted-3.13.1-266.el7_8.1.noarch
--> Running transaction check
---> Package libbabeltrace.x86_64 0:1.2.4-3.el7 will be installed
---> Package libradosstriper1.x86_64 2:14.2.12-0.el7 will be installed
---> Package librdkafka.x86_64 0:0.11.5-1.el7 will be installed
---> Package python-ceph-argparse.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-cephfs.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-mako.noarch 0:0.8.1-2.el7 will be installed
--> Processing Dependency: python-markupsafe for package: python-mako-0.8.1-2.el7.noarch
--> Processing Dependency: python-beaker for package: python-mako-0.8.1-2.el7.noarch
---> Package python-prettytable.noarch 0:0.7.2-3.el7 will be installed
---> Package python-rados.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-rbd.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-rgw.x86_64 2:14.2.12-0.el7 will be installed
---> Package python-simplegeneric.noarch 0:0.8-7.el7 will be installed
---> Package python-singledispatch.noarch 0:3.4.0.2-2.el7 will be installed
---> Package python-webob.noarch 0:1.2.3-7.el7 will be installed
---> Package python-webtest.noarch 0:1.3.4-6.el7 will be installed
---> Package python2-six.noarch 0:1.9.0-0.el7 will be installed
---> Package selinux-policy.noarch 0:3.13.1-229.el7 will be updated
---> Package selinux-policy.noarch 0:3.13.1-266.el7_8.1 will be an update
---> Package userspace-rcu.x86_64 0:0.7.16-1.el7 will be installed
--> Running transaction check
---> Package python-beaker.noarch 0:1.5.4-10.el7 will be installed
--> Processing Dependency: python-paste for package: python-beaker-1.5.4-10.el7.noarch
---> Package python-markupsafe.x86_64 0:0.11-10.el7 will be installed
--> Running transaction check
---> Package python-paste.noarch 0:1.7.5.1-9.20111221hg1498.el7 will be installed
--> Processing Dependency: python-tempita for package: python-paste-1.7.5.1-9.20111221hg1498.el7.noarch
--> Running transaction check
---> Package python-tempita.noarch 0:0.5.1-6.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package                  Arch    Version                        Repository
                                                                           Size
================================================================================
Installing:
 ceph                     x86_64  2:14.2.12-0.el7                x86_64   3.0 k
 ceph-radosgw             x86_64  2:14.2.12-0.el7                x86_64   5.8 M
Installing for dependencies:
 ceph-base                x86_64  2:14.2.12-0.el7                x86_64   5.4 M
 ceph-common              x86_64  2:14.2.12-0.el7                x86_64    19 M
 ceph-mds                 x86_64  2:14.2.12-0.el7                x86_64   1.9 M
 ceph-mgr                 x86_64  2:14.2.12-0.el7                x86_64   1.7 M
 ceph-mon                 x86_64  2:14.2.12-0.el7                x86_64   4.3 M
 ceph-osd                 x86_64  2:14.2.12-0.el7                x86_64    16 M
 ceph-selinux             x86_64  2:14.2.12-0.el7                x86_64    21 k
 leveldb                  x86_64  1.12.0-11.el7                  epel     161 k
 libbabeltrace            x86_64  1.2.4-3.el7                    epel     147 k
 libcephfs2               x86_64  2:14.2.12-0.el7                x86_64   500 k
 liboath                  x86_64  2.6.2-1.el7                    epel      51 k
 librabbitmq              x86_64  0.8.0-2.el7                    base      37 k
 libradosstriper1         x86_64  2:14.2.12-0.el7                x86_64   362 k
 librdkafka               x86_64  0.11.5-1.el7                   epel     368 k
 librgw2                  x86_64  2:14.2.12-0.el7                x86_64   5.1 M
 lttng-ust                x86_64  2.4.1-4.el7                    epel     176 k
 python-beaker            noarch  1.5.4-10.el7                   base      80 k
 python-ceph-argparse     x86_64  2:14.2.12-0.el7                x86_64    36 k
 python-cephfs            x86_64  2:14.2.12-0.el7                x86_64   112 k
 python-cherrypy          noarch  3.2.2-4.el7                    base     422 k
 python-mako              noarch  0.8.1-2.el7                    base     307 k
 python-markupsafe        x86_64  0.11-10.el7                    base      25 k
 python-paste             noarch  1.7.5.1-9.20111221hg1498.el7   base     866 k
 python-pecan             noarch  0.4.5-2.el7                    epel     255 k
 python-prettytable       noarch  0.7.2-3.el7                    base      37 k
 python-rados             x86_64  2:14.2.12-0.el7                x86_64   200 k
 python-rbd               x86_64  2:14.2.12-0.el7                x86_64   195 k
 python-rgw               x86_64  2:14.2.12-0.el7                x86_64    78 k
 python-simplegeneric     noarch  0.8-7.el7                      epel      12 k
 python-singledispatch    noarch  3.4.0.2-2.el7                  epel      18 k
 python-tempita           noarch  0.5.1-6.el7                    base      33 k
 python-webob             noarch  1.2.3-7.el7                    base     202 k
 python-webtest           noarch  1.3.4-6.el7                    base     102 k
 python-werkzeug          noarch  0.9.1-2.el7                    extras   562 k
 python2-bcrypt           x86_64  3.1.6-2.el7                    epel      39 k
 python2-six              noarch  1.9.0-0.el7                    epel     2.9 k
 userspace-rcu            x86_64  0.7.16-1.el7                   epel      73 k
Updating for dependencies:
 librados2                x86_64  2:14.2.12-0.el7                x86_64   3.4 M
 librbd1                  x86_64  2:14.2.12-0.el7                x86_64   1.7 M
 selinux-policy           noarch  3.13.1-266.el7_8.1             updates  497 k
 selinux-policy-targeted  noarch  3.13.1-266.el7_8.1             updates  7.0 M

Transaction Summary
================================================================================
Install  2 Packages (+37 Dependent packages)
Upgrade             (  4 Dependent packages)

Total download size: 77 M
Downloading packages:
No Presto metadata available for x86_64
No Presto metadata available for updates
--------------------------------------------------------------------------------
Total                                              3.0 MB/s |  77 MB  00:25     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : leveldb-1.12.0-11.el7.x86_64                                1/47 
  Installing : liboath-2.6.2-1.el7.x86_64                                  2/47 
  Installing : librabbitmq-0.8.0-2.el7.x86_64                              3/47 
  Installing : python-webob-1.2.3-7.el7.noarch                             4/47 
  Installing : 2:python-ceph-argparse-14.2.12-0.el7.x86_64                 5/47 
  Installing : python-webtest-1.3.4-6.el7.noarch                           6/47 
  Installing : python-tempita-0.5.1-6.el7.noarch                           7/47 
  Installing : python-paste-1.7.5.1-9.20111221hg1498.el7.noarch            8/47 
  Installing : python-beaker-1.5.4-10.el7.noarch                           9/47 
  Installing : python-cherrypy-3.2.2-4.el7.noarch                         10/47 
  Installing : python-werkzeug-0.9.1-2.el7.noarch                         11/47 
  Installing : librdkafka-0.11.5-1.el7.x86_64                             12/47 
  Installing : python-markupsafe-0.11-10.el7.x86_64                       13/47 
  Installing : python-mako-0.8.1-2.el7.noarch                             14/47 
  Installing : python-singledispatch-3.4.0.2-2.el7.noarch                 15/47 
  Updating   : selinux-policy-3.13.1-266.el7_8.1.noarch                   16/47 
  Updating   : selinux-policy-targeted-3.13.1-266.el7_8.1.noarch          17/47 
  Installing : libbabeltrace-1.2.4-3.el7.x86_64                           18/47 
  Installing : python-simplegeneric-0.8-7.el7.noarch                      19/47 
  Installing : python-pecan-0.4.5-2.el7.noarch                            20/47 
  Installing : userspace-rcu-0.7.16-1.el7.x86_64                          21/47 
  Installing : lttng-ust-2.4.1-4.el7.x86_64                               22/47 
  Updating   : 2:librados2-14.2.12-0.el7.x86_64                           23/47 
  Installing : 2:python-rados-14.2.12-0.el7.x86_64                        24/47 
  Installing : 2:librgw2-14.2.12-0.el7.x86_64                             25/47 
  Installing : 2:libcephfs2-14.2.12-0.el7.x86_64                          26/47 
  Updating   : 2:librbd1-14.2.12-0.el7.x86_64                             27/47 
  Installing : 2:python-rbd-14.2.12-0.el7.x86_64                          28/47 
  Installing : 2:python-cephfs-14.2.12-0.el7.x86_64                       29/47 
  Installing : 2:python-rgw-14.2.12-0.el7.x86_64                          30/47 
  Installing : 2:libradosstriper1-14.2.12-0.el7.x86_64                    31/47 
  Installing : python-prettytable-0.7.2-3.el7.noarch                      32/47 
  Installing : 2:ceph-common-14.2.12-0.el7.x86_64                         33/47 
  Installing : 2:ceph-base-14.2.12-0.el7.x86_64                           34/47 
  Installing : 2:ceph-selinux-14.2.12-0.el7.x86_64                        35/47 
  Installing : 2:ceph-mon-14.2.12-0.el7.x86_64                            36/47 
  Installing : 2:ceph-osd-14.2.12-0.el7.x86_64                            37/47 
  Installing : 2:ceph-mds-14.2.12-0.el7.x86_64                            38/47 
  Installing : python2-six-1.9.0-0.el7.noarch                             39/47 
  Installing : python2-bcrypt-3.1.6-2.el7.x86_64                          40/47 
  Installing : 2:ceph-mgr-14.2.12-0.el7.x86_64                            41/47 
  Installing : 2:ceph-14.2.12-0.el7.x86_64                                42/47 
  Installing : 2:ceph-radosgw-14.2.12-0.el7.x86_64                        43/47 
  Cleanup    : selinux-policy-targeted-3.13.1-229.el7.noarch              44/47 
  Cleanup    : 1:librbd1-10.2.5-4.el7.x86_64                              45/47 
  Cleanup    : selinux-policy-3.13.1-229.el7.noarch                       46/47 
  Cleanup    : 1:librados2-10.2.5-4.el7.x86_64                            47/47 
  Verifying  : liboath-2.6.2-1.el7.x86_64                                  1/47 
  Verifying  : python2-six-1.9.0-0.el7.noarch                              2/47 
  Verifying  : 2:python-rbd-14.2.12-0.el7.x86_64                           3/47 
  Verifying  : python-prettytable-0.7.2-3.el7.noarch                       4/47 
  Verifying  : leveldb-1.12.0-11.el7.x86_64                                5/47 
  Verifying  : python2-bcrypt-3.1.6-2.el7.x86_64                           6/47 
  Verifying  : userspace-rcu-0.7.16-1.el7.x86_64                           7/47 
  Verifying  : python-simplegeneric-0.8-7.el7.noarch                       8/47 
  Verifying  : 2:python-cephfs-14.2.12-0.el7.x86_64                        9/47 
  Verifying  : libbabeltrace-1.2.4-3.el7.x86_64                           10/47 
  Verifying  : python-mako-0.8.1-2.el7.noarch                             11/47 
  Verifying  : 2:ceph-selinux-14.2.12-0.el7.x86_64                        12/47 
  Verifying  : selinux-policy-3.13.1-266.el7_8.1.noarch                   13/47 
  Verifying  : 2:librgw2-14.2.12-0.el7.x86_64                             14/47 
  Verifying  : 2:ceph-mon-14.2.12-0.el7.x86_64                            15/47 
  Verifying  : python-pecan-0.4.5-2.el7.noarch                            16/47 
  Verifying  : 2:ceph-radosgw-14.2.12-0.el7.x86_64                        17/47 
  Verifying  : python-singledispatch-3.4.0.2-2.el7.noarch                 18/47 
  Verifying  : python-markupsafe-0.11-10.el7.x86_64                       19/47 
  Verifying  : 2:ceph-common-14.2.12-0.el7.x86_64                         20/47 
  Verifying  : 2:libcephfs2-14.2.12-0.el7.x86_64                          21/47 
  Verifying  : librdkafka-0.11.5-1.el7.x86_64                             22/47 
  Verifying  : python-paste-1.7.5.1-9.20111221hg1498.el7.noarch           23/47 
  Verifying  : lttng-ust-2.4.1-4.el7.x86_64                               24/47 
  Verifying  : 2:ceph-osd-14.2.12-0.el7.x86_64                            25/47 
  Verifying  : python-werkzeug-0.9.1-2.el7.noarch                         26/47 
  Verifying  : 2:python-rados-14.2.12-0.el7.x86_64                        27/47 
  Verifying  : 2:libradosstriper1-14.2.12-0.el7.x86_64                    28/47 
  Verifying  : 2:ceph-base-14.2.12-0.el7.x86_64                           29/47 
  Verifying  : 2:ceph-mgr-14.2.12-0.el7.x86_64                            30/47 
  Verifying  : python-cherrypy-3.2.2-4.el7.noarch                         31/47 
  Verifying  : python-beaker-1.5.4-10.el7.noarch                          32/47 
  Verifying  : 2:python-rgw-14.2.12-0.el7.x86_64                          33/47 
  Verifying  : 2:librados2-14.2.12-0.el7.x86_64                           34/47 
  Verifying  : 2:ceph-mds-14.2.12-0.el7.x86_64                            35/47 
  Verifying  : selinux-policy-targeted-3.13.1-266.el7_8.1.noarch          36/47 
  Verifying  : python-tempita-0.5.1-6.el7.noarch                          37/47 
  Verifying  : python-webtest-1.3.4-6.el7.noarch                          38/47 
  Verifying  : 2:python-ceph-argparse-14.2.12-0.el7.x86_64                39/47 
  Verifying  : librabbitmq-0.8.0-2.el7.x86_64                             40/47 
  Verifying  : 2:librbd1-14.2.12-0.el7.x86_64                             41/47 
  Verifying  : python-webob-1.2.3-7.el7.noarch                            42/47 
  Verifying  : 2:ceph-14.2.12-0.el7.x86_64                                43/47 
  Verifying  : 1:librbd1-10.2.5-4.el7.x86_64                              44/47 
  Verifying  : selinux-policy-3.13.1-229.el7.noarch                       45/47 
  Verifying  : selinux-policy-targeted-3.13.1-229.el7.noarch              46/47 
  Verifying  : 1:librados2-10.2.5-4.el7.x86_64                            47/47 

Installed:
  ceph.x86_64 2:14.2.12-0.el7        ceph-radosgw.x86_64 2:14.2.12-0.el7       

Dependency Installed:
  ceph-base.x86_64 2:14.2.12-0.el7                                              
  ceph-common.x86_64 2:14.2.12-0.el7                                            
  ceph-mds.x86_64 2:14.2.12-0.el7                                               
  ceph-mgr.x86_64 2:14.2.12-0.el7                                               
  ceph-mon.x86_64 2:14.2.12-0.el7                                               
  ceph-osd.x86_64 2:14.2.12-0.el7                                               
  ceph-selinux.x86_64 2:14.2.12-0.el7                                           
  leveldb.x86_64 0:1.12.0-11.el7                                                
  libbabeltrace.x86_64 0:1.2.4-3.el7                                            
  libcephfs2.x86_64 2:14.2.12-0.el7                                             
  liboath.x86_64 0:2.6.2-1.el7                                                  
  librabbitmq.x86_64 0:0.8.0-2.el7                                              
  libradosstriper1.x86_64 2:14.2.12-0.el7                                       
  librdkafka.x86_64 0:0.11.5-1.el7                                              
  librgw2.x86_64 2:14.2.12-0.el7                                                
  lttng-ust.x86_64 0:2.4.1-4.el7                                                
  python-beaker.noarch 0:1.5.4-10.el7                                           
  python-ceph-argparse.x86_64 2:14.2.12-0.el7                                   
  python-cephfs.x86_64 2:14.2.12-0.el7                                          
  python-cherrypy.noarch 0:3.2.2-4.el7                                          
  python-mako.noarch 0:0.8.1-2.el7                                              
  python-markupsafe.x86_64 0:0.11-10.el7                                        
  python-paste.noarch 0:1.7.5.1-9.20111221hg1498.el7                            
  python-pecan.noarch 0:0.4.5-2.el7                                             
  python-prettytable.noarch 0:0.7.2-3.el7                                       
  python-rados.x86_64 2:14.2.12-0.el7                                           
  python-rbd.x86_64 2:14.2.12-0.el7                                             
  python-rgw.x86_64 2:14.2.12-0.el7                                             
  python-simplegeneric.noarch 0:0.8-7.el7                                       
  python-singledispatch.noarch 0:3.4.0.2-2.el7                                  
  python-tempita.noarch 0:0.5.1-6.el7                                           
  python-webob.noarch 0:1.2.3-7.el7                                             
  python-webtest.noarch 0:1.3.4-6.el7                                           
  python-werkzeug.noarch 0:0.9.1-2.el7                                          
  python2-bcrypt.x86_64 0:3.1.6-2.el7                                           
  python2-six.noarch 0:1.9.0-0.el7                                              
  userspace-rcu.x86_64 0:0.7.16-1.el7                                           

Dependency Updated:
  librados2.x86_64 2:14.2.12-0.el7                                              
  librbd1.x86_64 2:14.2.12-0.el7                                                
  selinux-policy.noarch 0:3.13.1-266.el7_8.1                                    
  selinux-policy-targeted.noarch 0:3.13.1-266.el7_8.1                           

Complete!
[root@node01 ~]# 

[root@node01 ~]# ceph -v
ceph version 14.2.12 (2f3caa3b8b3d5c5f2719a1e9d8e7deea5ae1a5c6) nautilus (stable)
[root@node01 ~]# 
[root@node01 ~]# rpm -qa|grep ceph
ceph-base-14.2.12-0.el7.x86_64
ceph-mgr-14.2.12-0.el7.x86_64
ceph-selinux-14.2.12-0.el7.x86_64
ceph-radosgw-14.2.12-0.el7.x86_64
ceph-mds-14.2.12-0.el7.x86_64
python-ceph-argparse-14.2.12-0.el7.x86_64
python-cephfs-14.2.12-0.el7.x86_64
ceph-osd-14.2.12-0.el7.x86_64
ceph-deploy-2.0.1-0.noarch
libcephfs2-14.2.12-0.el7.x86_64
ceph-14.2.12-0.el7.x86_64
ceph-common-14.2.12-0.el7.x86_64
ceph-mon-14.2.12-0.el7.x86_64
[root@node01 ~]# 

[root@node02 ~]# ceph -v
ceph version 14.2.12 (2f3caa3b8b3d5c5f2719a1e9d8e7deea5ae1a5c6) nautilus (stable)
[root@node02 ~]#
[root@node03 ~]# rpm -qa|grep ceph
ceph-mon-14.2.12-0.el7.x86_64
ceph-common-14.2.12-0.el7.x86_64
ceph-selinux-14.2.12-0.el7.x86_64
ceph-radosgw-14.2.12-0.el7.x86_64
python-ceph-argparse-14.2.12-0.el7.x86_64
python-cephfs-14.2.12-0.el7.x86_64
ceph-osd-14.2.12-0.el7.x86_64
libcephfs2-14.2.12-0.el7.x86_64
ceph-base-14.2.12-0.el7.x86_64
ceph-14.2.12-0.el7.x86_64
ceph-mgr-14.2.12-0.el7.x86_64
ceph-mds-14.2.12-0.el7.x86_64
[root@node02 ~]# 
[root@node03 ~]# ceph -v
ceph version 14.2.12 (2f3caa3b8b3d5c5f2719a1e9d8e7deea5ae1a5c6) nautilus (stable)
[root@node03 ~]#
[root@node03 ~]# rpm -qa|grep ceph
ceph-mon-14.2.12-0.el7.x86_64
ceph-common-14.2.12-0.el7.x86_64
ceph-selinux-14.2.12-0.el7.x86_64
ceph-radosgw-14.2.12-0.el7.x86_64
python-ceph-argparse-14.2.12-0.el7.x86_64
python-cephfs-14.2.12-0.el7.x86_64
ceph-osd-14.2.12-0.el7.x86_64
libcephfs2-14.2.12-0.el7.x86_64
ceph-base-14.2.12-0.el7.x86_64
ceph-14.2.12-0.el7.x86_64
ceph-mgr-14.2.12-0.el7.x86_64
ceph-mds-14.2.12-0.el7.x86_64
[root@node03 ~]# 

7.开始部署集群

[root@node01 ~]# mkdir -p /cluster [root@node01 ~]# cd /cluster/ [root@node01 cluster]# ceph-deploy new node01 node02 node03

[root@node01 ~]# mkdir -p /cluster
[root@node01 ~]# 
[root@node01 ~]# cd /cluster/
[root@node01 ~]# 
[root@node01 cluster]# ceph-deploy new node01 node02 node03
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy new node01 node02 node03
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ]  username                      : None
[ceph_deploy.cli][INFO ]  func                          : <function new at 0x7f2c2dfbd7d0>
[ceph_deploy.cli][INFO ]  verbose                       : False
[ceph_deploy.cli][INFO ]  overwrite_conf                : False
[ceph_deploy.cli][INFO ]  quiet                         : False
[ceph_deploy.cli][INFO ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f2c2d736dd0>
[ceph_deploy.cli][INFO ]  cluster                       : ceph
[ceph_deploy.cli][INFO ]  ssh_copykey                   : True
[ceph_deploy.cli][INFO ]  mon                           : ['node01', 'node02', 'node03']
[ceph_deploy.cli][INFO ]  public_network                : None
[ceph_deploy.cli][INFO ]  ceph_conf                     : None
[ceph_deploy.cli][INFO ]  cluster_network               : None
[ceph_deploy.cli][INFO ]  default_release               : False
[ceph_deploy.cli][INFO ]  fsid                          : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] find the location of an executable
[node01][INFO ] Running command: /usr/sbin/ip link show
[node01][INFO ] Running command: /usr/sbin/ip addr show
[node01][DEBUG ] IP addresses found: [u'192.168.1.181']
[ceph_deploy.new][DEBUG ] Resolving host node01
[ceph_deploy.new][DEBUG ] Monitor node01 at 192.168.1.181
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[node02][DEBUG ] connected to host: node01 
[node02][INFO ] Running command: ssh -CT -o BatchMode=yes node02
[ceph_deploy.new][WARNIN] could not connect via SSH
[ceph_deploy.new][INFO ] creating a passwordless id_rsa.pub key file
[ceph_deploy.new][DEBUG ] connected to host: node01 
[ceph_deploy.new][INFO ] Running command: ssh-keygen -t rsa -N  -f /root/.ssh/id_rsa
[ceph_deploy.new][DEBUG ] Generating public/private rsa key pair.
[ceph_deploy.new][DEBUG ] Your identification has been saved in /root/.ssh/id_rsa.
[ceph_deploy.new][DEBUG ] Your public key has been saved in /root/.ssh/id_rsa.pub.
[ceph_deploy.new][DEBUG ] The key fingerprint is:
[ceph_deploy.new][DEBUG ] SHA256:q7J5xVb9xGPEw1SPQVlkndreYjqHZYUPSfpr6+h/ojU root@node01
[ceph_deploy.new][DEBUG ] The key's randomart image is:
[ceph_deploy.new][DEBUG ] +---[RSA 2048]----+
[ceph_deploy.new][DEBUG ] |             =+=B|
[ceph_deploy.new][DEBUG ] |              B=o|
[ceph_deploy.new][DEBUG ] |           . =++.|
[ceph_deploy.new][DEBUG ] |          . o.O..|
[ceph_deploy.new][DEBUG ] |       .S.   =.=.|
[ceph_deploy.new][DEBUG ] |        +.    B.o|
[ceph_deploy.new][DEBUG ] |       o.    *Eo |
[ceph_deploy.new][DEBUG ] |    ....    +o=..|
[ceph_deploy.new][DEBUG ] |    o+.    .+B++ |
[ceph_deploy.new][DEBUG ] +----[SHA256]-----+
[ceph_deploy.new][INFO ] will connect again with password prompt
root@node02's password: 
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][WARNIN] .ssh/authorized_keys does not exist, will skip adding keys
root@node02's password: 
root@node02's password: 
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] find the location of an executable
[node02][INFO ] Running command: /usr/sbin/ip link show
[node02][INFO ] Running command: /usr/sbin/ip addr show
[node02][DEBUG ] IP addresses found: [u'172.17.93.1', u'192.168.1.182']
[ceph_deploy.new][DEBUG ] Resolving host node02
[ceph_deploy.new][DEBUG ] Monitor node02 at 192.168.1.182
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[node03][DEBUG ] connected to host: node01 
[node03][INFO ] Running command: ssh -CT -o BatchMode=yes node03
[ceph_deploy.new][WARNIN] could not connect via SSH
[ceph_deploy.new][INFO ] will connect again with password prompt
root@node03's password: 
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][WARNIN] .ssh/authorized_keys does not exist, will skip adding keys
root@node03's password: 
root@node03's password: 
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] find the location of an executable
[node03][INFO ] Running command: /usr/sbin/ip link show
[node03][INFO ] Running command: /usr/sbin/ip addr show
[node03][DEBUG ] IP addresses found: [u'192.168.1.183', u'172.17.12.1']
[ceph_deploy.new][DEBUG ] Resolving host node03
[ceph_deploy.new][DEBUG ] Monitor node03 at 192.168.1.183
[ceph_deploy.new][DEBUG ] Monitor initial members are ['node01', 'node02', 'node03']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.1.181', '192.168.1.182', '192.168.1.183']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
[root@node01 cluster]# 

8.Ceph分发秘钥

[root@node01 cluster]# ssh-keygen [root@node01 cluster]# ssh-copy-id node01 [root@node01 cluster]# ssh-copy-id node02 [root@node01 cluster]# ssh-copy-id node03

[root@node01 cluster]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
/root/.ssh/id_rsa already exists.
Overwrite (y/n)? y
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:b+18tga3Z9OxJdIbwJRo4yi9qUu8NB4ZBcoMKwfDtyA root@node01
The key's randomart image is:
+---[RSA 2048]----+
|+ .   .    . .   |
|E= * . .  + o    |
|o = =  ..+ +     |
| o .  ..o . o    |
|      ..So   o   |
|     . oo. .o =..|
|      B.  o .+ ==|
|     +.+ . o  *o+|
|      +.    o+.+.|
+----[SHA256]-----+
[root@node01 cluster]# 
[root@node01 cluster]# ssh-copy-id node01
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node01's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node01'"
and check to make sure that only the key(s) you wanted were added.

[root@node01 cluster]# 
[root@node01 cluster]# ssh-copy-id node02
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node02's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node02'"
and check to make sure that only the key(s) you wanted were added.

[root@node01 cluster]# 
[root@node01 cluster]# ssh-copy-id node03
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node03's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node03'"
and check to make sure that only the key(s) you wanted were added.

[root@node01 cluster]# 

9.开始部署集群

[root@node01 cluster]# ceph-deploy mon create-initial

[root@node01 cluster]# ceph-deploy admin node01 node02 node03

[root@node01 cluster]# ceph -s

[root@node01 cluster]# ceph-deploy mon create-initial  
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create-initial
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f96f1d85950>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mon at 0x7f96f2611d70>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  keyrings                      : None
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts node01 node02 node03
[ceph_deploy.mon][DEBUG ] detecting platform for host node01 ...
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: CentOS Linux 7.6.1810 Core
[node01][DEBUG ] determining if provided host has same hostname in remote
[node01][DEBUG ] get remote short hostname
[node01][DEBUG ] deploying mon to node01
[node01][DEBUG ] get remote short hostname
[node01][DEBUG ] remote hostname: node01
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node01][DEBUG ] create the mon path if it does not exist
[node01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node01/done
[node01][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node01/done
[node01][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-node01.mon.keyring
[node01][DEBUG ] create the monitor keyring file
[node01][INFO  ] Running command: ceph-mon --cluster ceph --mkfs -i node01 --keyring /var/lib/ceph/tmp/ceph-node01.mon.keyring --setuser 167 --setgroup 167
[node01][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-node01.mon.keyring
[node01][DEBUG ] create a done file to avoid re-doing the mon deployment
[node01][DEBUG ] create the init path if it does not exist
[node01][INFO  ] Running command: systemctl enable ceph.target
[node01][INFO  ] Running command: systemctl enable ceph-mon@node01
[node01][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node01.service to /usr/lib/systemd/system/ceph-mon@.service.
[node01][INFO  ] Running command: systemctl start ceph-mon@node01
[node01][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node01.asok mon_status
[node01][DEBUG ] ********************************************************************************
[node01][DEBUG ] status for monitor: mon.node01
[node01][DEBUG ] {
[node01][DEBUG ]   "election_epoch": 0, 
[node01][DEBUG ]   "extra_probe_peers": [
[node01][DEBUG ]     {
[node01][DEBUG ]       "addrvec": [
[node01][DEBUG ]         {
[node01][DEBUG ]           "addr": "192.168.1.182:3300", 
[node01][DEBUG ]           "nonce": 0, 
[node01][DEBUG ]           "type": "v2"
[node01][DEBUG ]         }, 
[node01][DEBUG ]         {
[node01][DEBUG ]           "addr": "192.168.1.182:6789", 
[node01][DEBUG ]           "nonce": 0, 
[node01][DEBUG ]           "type": "v1"
[node01][DEBUG ]         }
[node01][DEBUG ]       ]
[node01][DEBUG ]     }, 
[node01][DEBUG ]     {
[node01][DEBUG ]       "addrvec": [
[node01][DEBUG ]         {
[node01][DEBUG ]           "addr": "192.168.1.183:3300", 
[node01][DEBUG ]           "nonce": 0, 
[node01][DEBUG ]           "type": "v2"
[node01][DEBUG ]         }, 
[node01][DEBUG ]         {
[node01][DEBUG ]           "addr": "192.168.1.183:6789", 
[node01][DEBUG ]           "nonce": 0, 
[node01][DEBUG ]           "type": "v1"
[node01][DEBUG ]         }
[node01][DEBUG ]       ]
[node01][DEBUG ]     }
[node01][DEBUG ]   ], 
[node01][DEBUG ]   "feature_map": {
[node01][DEBUG ]     "mon": [
[node01][DEBUG ]       {
[node01][DEBUG ]         "features": "0x3ffddff8ffecffff", 
[node01][DEBUG ]         "num": 1, 
[node01][DEBUG ]         "release": "luminous"
[node01][DEBUG ]       }
[node01][DEBUG ]     ]
[node01][DEBUG ]   }, 
[node01][DEBUG ]   "features": {
[node01][DEBUG ]     "quorum_con": "0", 
[node01][DEBUG ]     "quorum_mon": [], 
[node01][DEBUG ]     "required_con": "0", 
[node01][DEBUG ]     "required_mon": []
[node01][DEBUG ]   }, 
[node01][DEBUG ]   "monmap": {
[node01][DEBUG ]     "created": "2020-10-31 23:08:25.227025", 
[node01][DEBUG ]     "epoch": 0, 
[node01][DEBUG ]     "features": {
[node01][DEBUG ]       "optional": [], 
[node01][DEBUG ]       "persistent": []
[node01][DEBUG ]     }, 
[node01][DEBUG ]     "fsid": "c90f3c76-20f0-4091-a66b-74ec0e6f4ec8", 
[node01][DEBUG ]     "min_mon_release": 0, 
[node01][DEBUG ]     "min_mon_release_name": "unknown", 
[node01][DEBUG ]     "modified": "2020-10-31 23:08:25.227025", 
[node01][DEBUG ]     "mons": [
[node01][DEBUG ]       {
[node01][DEBUG ]         "addr": "192.168.1.181:6789/0", 
[node01][DEBUG ]         "name": "node01", 
[node01][DEBUG ]         "public_addr": "192.168.1.181:6789/0", 
[node01][DEBUG ]         "public_addrs": {
[node01][DEBUG ]           "addrvec": [
[node01][DEBUG ]             {
[node01][DEBUG ]               "addr": "192.168.1.181:3300", 
[node01][DEBUG ]               "nonce": 0, 
[node01][DEBUG ]               "type": "v2"
[node01][DEBUG ]             }, 
[node01][DEBUG ]             {
[node01][DEBUG ]               "addr": "192.168.1.181:6789", 
[node01][DEBUG ]               "nonce": 0, 
[node01][DEBUG ]               "type": "v1"
[node01][DEBUG ]             }
[node01][DEBUG ]           ]
[node01][DEBUG ]         }, 
[node01][DEBUG ]         "rank": 0
[node01][DEBUG ]       }, 
[node01][DEBUG ]       {
[node01][DEBUG ]         "addr": "0.0.0.0:0/1", 
[node01][DEBUG ]         "name": "node02", 
[node01][DEBUG ]         "public_addr": "0.0.0.0:0/1", 
[node01][DEBUG ]         "public_addrs": {
[node01][DEBUG ]           "addrvec": [
[node01][DEBUG ]             {
[node01][DEBUG ]               "addr": "0.0.0.0:0", 
[node01][DEBUG ]               "nonce": 1, 
[node01][DEBUG ]               "type": "v1"
[node01][DEBUG ]             }
[node01][DEBUG ]           ]
[node01][DEBUG ]         }, 
[node01][DEBUG ]         "rank": 1
[node01][DEBUG ]       }, 
[node01][DEBUG ]       {
[node01][DEBUG ]         "addr": "0.0.0.0:0/2", 
[node01][DEBUG ]         "name": "node03", 
[node01][DEBUG ]         "public_addr": "0.0.0.0:0/2", 
[node01][DEBUG ]         "public_addrs": {
[node01][DEBUG ]           "addrvec": [
[node01][DEBUG ]             {
[node01][DEBUG ]               "addr": "0.0.0.0:0", 
[node01][DEBUG ]               "nonce": 2, 
[node01][DEBUG ]               "type": "v1"
[node01][DEBUG ]             }
[node01][DEBUG ]           ]
[node01][DEBUG ]         }, 
[node01][DEBUG ]         "rank": 2
[node01][DEBUG ]       }
[node01][DEBUG ]     ]
[node01][DEBUG ]   }, 
[node01][DEBUG ]   "name": "node01", 
[node01][DEBUG ]   "outside_quorum": [
[node01][DEBUG ]     "node01"
[node01][DEBUG ]   ], 
[node01][DEBUG ]   "quorum": [], 
[node01][DEBUG ]   "rank": 0, 
[node01][DEBUG ]   "state": "probing", 
[node01][DEBUG ]   "sync_provider": []
[node01][DEBUG ] }
[node01][DEBUG ] ********************************************************************************
[node01][INFO  ] monitor: mon.node01 is running
[node01][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node01.asok mon_status
[ceph_deploy.mon][DEBUG ] detecting platform for host node02 ...
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: CentOS Linux 7.6.1810 Core
[node02][DEBUG ] determining if provided host has same hostname in remote
[node02][DEBUG ] get remote short hostname
[node02][DEBUG ] deploying mon to node02
[node02][DEBUG ] get remote short hostname
[node02][DEBUG ] remote hostname: node02
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node02][DEBUG ] create the mon path if it does not exist
[node02][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node02/done
[node02][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node02/done
[node02][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-node02.mon.keyring
[node02][DEBUG ] create the monitor keyring file
[node02][INFO  ] Running command: ceph-mon --cluster ceph --mkfs -i node02 --keyring /var/lib/ceph/tmp/ceph-node02.mon.keyring --setuser 167 --setgroup 167
[node02][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-node02.mon.keyring
[node02][DEBUG ] create a done file to avoid re-doing the mon deployment
[node02][DEBUG ] create the init path if it does not exist
[node02][INFO  ] Running command: systemctl enable ceph.target
[node02][INFO  ] Running command: systemctl enable ceph-mon@node02
[node02][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node02.service to /usr/lib/systemd/system/ceph-mon@.service.
[node02][INFO  ] Running command: systemctl start ceph-mon@node02
[node02][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node02.asok mon_status
[node02][DEBUG ] ********************************************************************************
[node02][DEBUG ] status for monitor: mon.node02
[node02][DEBUG ] {
[node02][DEBUG ]   "election_epoch": 0, 
[node02][DEBUG ]   "extra_probe_peers": [
[node02][DEBUG ]     {
[node02][DEBUG ]       "addrvec": [
[node02][DEBUG ]         {
[node02][DEBUG ]           "addr": "192.168.1.181:3300", 
[node02][DEBUG ]           "nonce": 0, 
[node02][DEBUG ]           "type": "v2"
[node02][DEBUG ]         }, 
[node02][DEBUG ]         {
[node02][DEBUG ]           "addr": "192.168.1.181:6789", 
[node02][DEBUG ]           "nonce": 0, 
[node02][DEBUG ]           "type": "v1"
[node02][DEBUG ]         }
[node02][DEBUG ]       ]
[node02][DEBUG ]     }, 
[node02][DEBUG ]     {
[node02][DEBUG ]       "addrvec": [
[node02][DEBUG ]         {
[node02][DEBUG ]           "addr": "192.168.1.183:3300", 
[node02][DEBUG ]           "nonce": 0, 
[node02][DEBUG ]           "type": "v2"
[node02][DEBUG ]         }, 
[node02][DEBUG ]         {
[node02][DEBUG ]           "addr": "192.168.1.183:6789", 
[node02][DEBUG ]           "nonce": 0, 
[node02][DEBUG ]           "type": "v1"
[node02][DEBUG ]         }
[node02][DEBUG ]       ]
[node02][DEBUG ]     }
[node02][DEBUG ]   ], 
[node02][DEBUG ]   "feature_map": {
[node02][DEBUG ]     "mon": [
[node02][DEBUG ]       {
[node02][DEBUG ]         "features": "0x3ffddff8ffecffff", 
[node02][DEBUG ]         "num": 1, 
[node02][DEBUG ]         "release": "luminous"
[node02][DEBUG ]       }
[node02][DEBUG ]     ]
[node02][DEBUG ]   }, 
[node02][DEBUG ]   "features": {
[node02][DEBUG ]     "quorum_con": "0", 
[node02][DEBUG ]     "quorum_mon": [], 
[node02][DEBUG ]     "required_con": "0", 
[node02][DEBUG ]     "required_mon": []
[node02][DEBUG ]   }, 
[node02][DEBUG ]   "monmap": {
[node02][DEBUG ]     "created": "2020-10-31 23:08:28.614711", 
[node02][DEBUG ]     "epoch": 0, 
[node02][DEBUG ]     "features": {
[node02][DEBUG ]       "optional": [], 
[node02][DEBUG ]       "persistent": []
[node02][DEBUG ]     }, 
[node02][DEBUG ]     "fsid": "c90f3c76-20f0-4091-a66b-74ec0e6f4ec8", 
[node02][DEBUG ]     "min_mon_release": 0, 
[node02][DEBUG ]     "min_mon_release_name": "unknown", 
[node02][DEBUG ]     "modified": "2020-10-31 23:08:28.614711", 
[node02][DEBUG ]     "mons": [
[node02][DEBUG ]       {
[node02][DEBUG ]         "addr": "192.168.1.182:6789/0", 
[node02][DEBUG ]         "name": "node02", 
[node02][DEBUG ]         "public_addr": "192.168.1.182:6789/0", 
[node02][DEBUG ]         "public_addrs": {
[node02][DEBUG ]           "addrvec": [
[node02][DEBUG ]             {
[node02][DEBUG ]               "addr": "192.168.1.182:3300", 
[node02][DEBUG ]               "nonce": 0, 
[node02][DEBUG ]               "type": "v2"
[node02][DEBUG ]             }, 
[node02][DEBUG ]             {
[node02][DEBUG ]               "addr": "192.168.1.182:6789", 
[node02][DEBUG ]               "nonce": 0, 
[node02][DEBUG ]               "type": "v1"
[node02][DEBUG ]             }
[node02][DEBUG ]           ]
[node02][DEBUG ]         }, 
[node02][DEBUG ]         "rank": 0
[node02][DEBUG ]       }, 
[node02][DEBUG ]       {
[node02][DEBUG ]         "addr": "0.0.0.0:0/1", 
[node02][DEBUG ]         "name": "node01", 
[node02][DEBUG ]         "public_addr": "0.0.0.0:0/1", 
[node02][DEBUG ]         "public_addrs": {
[node02][DEBUG ]           "addrvec": [
[node02][DEBUG ]             {
[node02][DEBUG ]               "addr": "0.0.0.0:0", 
[node02][DEBUG ]               "nonce": 1, 
[node02][DEBUG ]               "type": "v1"
[node02][DEBUG ]             }
[node02][DEBUG ]           ]
[node02][DEBUG ]         }, 
[node02][DEBUG ]         "rank": 1
[node02][DEBUG ]       }, 
[node02][DEBUG ]       {
[node02][DEBUG ]         "addr": "0.0.0.0:0/2", 
[node02][DEBUG ]         "name": "node03", 
[node02][DEBUG ]         "public_addr": "0.0.0.0:0/2", 
[node02][DEBUG ]         "public_addrs": {
[node02][DEBUG ]           "addrvec": [
[node02][DEBUG ]             {
[node02][DEBUG ]               "addr": "0.0.0.0:0", 
[node02][DEBUG ]               "nonce": 2, 
[node02][DEBUG ]               "type": "v1"
[node02][DEBUG ]             }
[node02][DEBUG ]           ]
[node02][DEBUG ]         }, 
[node02][DEBUG ]         "rank": 2
[node02][DEBUG ]       }
[node02][DEBUG ]     ]
[node02][DEBUG ]   }, 
[node02][DEBUG ]   "name": "node02", 
[node02][DEBUG ]   "outside_quorum": [
[node02][DEBUG ]     "node02"
[node02][DEBUG ]   ], 
[node02][DEBUG ]   "quorum": [], 
[node02][DEBUG ]   "rank": 0, 
[node02][DEBUG ]   "state": "probing", 
[node02][DEBUG ]   "sync_provider": []
[node02][DEBUG ] }
[node02][DEBUG ] ********************************************************************************
[node02][INFO  ] monitor: mon.node02 is running
[node02][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node02.asok mon_status
[ceph_deploy.mon][DEBUG ] detecting platform for host node03 ...
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: CentOS Linux 7.6.1810 Core
[node03][DEBUG ] determining if provided host has same hostname in remote
[node03][DEBUG ] get remote short hostname
[node03][DEBUG ] deploying mon to node03
[node03][DEBUG ] get remote short hostname
[node03][DEBUG ] remote hostname: node03
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node03][DEBUG ] create the mon path if it does not exist
[node03][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node03/done
[node03][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node03/done
[node03][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-node03.mon.keyring
[node03][DEBUG ] create the monitor keyring file
[node03][INFO  ] Running command: ceph-mon --cluster ceph --mkfs -i node03 --keyring /var/lib/ceph/tmp/ceph-node03.mon.keyring --setuser 167 --setgroup 167
[node03][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-node03.mon.keyring
[node03][DEBUG ] create a done file to avoid re-doing the mon deployment
[node03][DEBUG ] create the init path if it does not exist
[node03][INFO  ] Running command: systemctl enable ceph.target
[node03][INFO  ] Running command: systemctl enable ceph-mon@node03
[node03][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node03.service to /usr/lib/systemd/system/ceph-mon@.service.
[node03][INFO  ] Running command: systemctl start ceph-mon@node03
[node03][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node03.asok mon_status
[node03][DEBUG ] ********************************************************************************
[node03][DEBUG ] status for monitor: mon.node03
[node03][DEBUG ] {
[node03][DEBUG ]   "election_epoch": 1, 
[node03][DEBUG ]   "extra_probe_peers": [
[node03][DEBUG ]     {
[node03][DEBUG ]       "addrvec": [
[node03][DEBUG ]         {
[node03][DEBUG ]           "addr": "192.168.1.181:3300", 
[node03][DEBUG ]           "nonce": 0, 
[node03][DEBUG ]           "type": "v2"
[node03][DEBUG ]         }, 
[node03][DEBUG ]         {
[node03][DEBUG ]           "addr": "192.168.1.181:6789", 
[node03][DEBUG ]           "nonce": 0, 
[node03][DEBUG ]           "type": "v1"
[node03][DEBUG ]         }
[node03][DEBUG ]       ]
[node03][DEBUG ]     }, 
[node03][DEBUG ]     {
[node03][DEBUG ]       "addrvec": [
[node03][DEBUG ]         {
[node03][DEBUG ]           "addr": "192.168.1.182:3300", 
[node03][DEBUG ]           "nonce": 0, 
[node03][DEBUG ]           "type": "v2"
[node03][DEBUG ]         }, 
[node03][DEBUG ]         {
[node03][DEBUG ]           "addr": "192.168.1.182:6789", 
[node03][DEBUG ]           "nonce": 0, 
[node03][DEBUG ]           "type": "v1"
[node03][DEBUG ]         }
[node03][DEBUG ]       ]
[node03][DEBUG ]     }
[node03][DEBUG ]   ], 
[node03][DEBUG ]   "feature_map": {
[node03][DEBUG ]     "mon": [
[node03][DEBUG ]       {
[node03][DEBUG ]         "features": "0x3ffddff8ffecffff", 
[node03][DEBUG ]         "num": 1, 
[node03][DEBUG ]         "release": "luminous"
[node03][DEBUG ]       }
[node03][DEBUG ]     ]
[node03][DEBUG ]   }, 
[node03][DEBUG ]   "features": {
[node03][DEBUG ]     "quorum_con": "0", 
[node03][DEBUG ]     "quorum_mon": [], 
[node03][DEBUG ]     "required_con": "0", 
[node03][DEBUG ]     "required_mon": []
[node03][DEBUG ]   }, 
[node03][DEBUG ]   "monmap": {
[node03][DEBUG ]     "created": "2020-10-31 23:08:31.811660", 
[node03][DEBUG ]     "epoch": 0, 
[node03][DEBUG ]     "features": {
[node03][DEBUG ]       "optional": [], 
[node03][DEBUG ]       "persistent": []
[node03][DEBUG ]     }, 
[node03][DEBUG ]     "fsid": "c90f3c76-20f0-4091-a66b-74ec0e6f4ec8", 
[node03][DEBUG ]     "min_mon_release": 0, 
[node03][DEBUG ]     "min_mon_release_name": "unknown", 
[node03][DEBUG ]     "modified": "2020-10-31 23:08:31.811660", 
[node03][DEBUG ]     "mons": [
[node03][DEBUG ]       {
[node03][DEBUG ]         "addr": "192.168.1.182:6789/0", 
[node03][DEBUG ]         "name": "node02", 
[node03][DEBUG ]         "public_addr": "192.168.1.182:6789/0", 
[node03][DEBUG ]         "public_addrs": {
[node03][DEBUG ]           "addrvec": [
[node03][DEBUG ]             {
[node03][DEBUG ]               "addr": "192.168.1.182:3300", 
[node03][DEBUG ]               "nonce": 0, 
[node03][DEBUG ]               "type": "v2"
[node03][DEBUG ]             }, 
[node03][DEBUG ]             {
[node03][DEBUG ]               "addr": "192.168.1.182:6789", 
[node03][DEBUG ]               "nonce": 0, 
[node03][DEBUG ]               "type": "v1"
[node03][DEBUG ]             }
[node03][DEBUG ]           ]
[node03][DEBUG ]         }, 
[node03][DEBUG ]         "rank": 0
[node03][DEBUG ]       }, 
[node03][DEBUG ]       {
[node03][DEBUG ]         "addr": "192.168.1.183:6789/0", 
[node03][DEBUG ]         "name": "node03", 
[node03][DEBUG ]         "public_addr": "192.168.1.183:6789/0", 
[node03][DEBUG ]         "public_addrs": {
[node03][DEBUG ]           "addrvec": [
[node03][DEBUG ]             {
[node03][DEBUG ]               "addr": "192.168.1.183:3300", 
[node03][DEBUG ]               "nonce": 0, 
[node03][DEBUG ]               "type": "v2"
[node03][DEBUG ]             }, 
[node03][DEBUG ]             {
[node03][DEBUG ]               "addr": "192.168.1.183:6789", 
[node03][DEBUG ]               "nonce": 0, 
[node03][DEBUG ]               "type": "v1"
[node03][DEBUG ]             }
[node03][DEBUG ]           ]
[node03][DEBUG ]         }, 
[node03][DEBUG ]         "rank": 1
[node03][DEBUG ]       }, 
[node03][DEBUG ]       {
[node03][DEBUG ]         "addr": "0.0.0.0:0/1", 
[node03][DEBUG ]         "name": "node01", 
[node03][DEBUG ]         "public_addr": "0.0.0.0:0/1", 
[node03][DEBUG ]         "public_addrs": {
[node03][DEBUG ]           "addrvec": [
[node03][DEBUG ]             {
[node03][DEBUG ]               "addr": "0.0.0.0:0", 
[node03][DEBUG ]               "nonce": 1, 
[node03][DEBUG ]               "type": "v1"
[node03][DEBUG ]             }
[node03][DEBUG ]           ]
[node03][DEBUG ]         }, 
[node03][DEBUG ]         "rank": 2
[node03][DEBUG ]       }
[node03][DEBUG ]     ]
[node03][DEBUG ]   }, 
[node03][DEBUG ]   "name": "node03", 
[node03][DEBUG ]   "outside_quorum": [], 
[node03][DEBUG ]   "quorum": [], 
[node03][DEBUG ]   "rank": 1, 
[node03][DEBUG ]   "state": "electing", 
[node03][DEBUG ]   "sync_provider": []
[node03][DEBUG ] }
[node03][DEBUG ] ********************************************************************************
[node03][INFO  ] monitor: mon.node03 is running
[node03][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node03.asok mon_status
[ceph_deploy.mon][INFO  ] processing monitor mon.node01
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node01.asok mon_status
[ceph_deploy.mon][WARNIN] mon.node01 monitor is not yet in quorum, tries left: 5
[ceph_deploy.mon][WARNIN] waiting 5 seconds before retrying
[node01][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node01.asok mon_status
[ceph_deploy.mon][WARNIN] mon.node01 monitor is not yet in quorum, tries left: 4
[ceph_deploy.mon][WARNIN] waiting 10 seconds before retrying
[node01][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node01.asok mon_status
[ceph_deploy.mon][WARNIN] mon.node01 monitor is not yet in quorum, tries left: 3
[ceph_deploy.mon][WARNIN] waiting 10 seconds before retrying
[node01][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node01.asok mon_status
[ceph_deploy.mon][INFO  ] mon.node01 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] processing monitor mon.node02
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node02.asok mon_status
[ceph_deploy.mon][INFO  ] mon.node02 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] processing monitor mon.node03
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node03.asok mon_status
[ceph_deploy.mon][INFO  ] mon.node03 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] all initial monitors are running and have formed quorum
[ceph_deploy.mon][INFO  ] Running gatherkeys...
[ceph_deploy.gatherkeys][INFO  ] Storing keys in temp directory /tmp/tmp05thDR
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] get remote short hostname
[node01][DEBUG ] fetch remote file
[node01][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.node01.asok mon_status
[node01][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node01/keyring auth get client.admin
[node01][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node01/keyring auth get client.bootstrap-mds
[node01][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node01/keyring auth get client.bootstrap-mgr
[node01][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node01/keyring auth get client.bootstrap-osd
[node01][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node01/keyring auth get client.bootstrap-rgw
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mgr.keyring
[ceph_deploy.gatherkeys][INFO  ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO  ] Destroy temp directory /tmp/tmp05thDR
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy admin node01 node02 node03
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy admin node01 node02 node03
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f73aa83ef38>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  client                        : ['node01', 'node02', 'node03']
[ceph_deploy.cli][INFO  ]  func                          : <function admin at 0x7f73ab2ddb90>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to node01
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to node02
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to node03
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 3m)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 ~]# 

10.创建OSD数据存储设备和目录

[root@node01 cluster]# ceph-deploy osd create node01 --data /dev/sdb

[root@node01 cluster]# ceph-deploy osd create node01 --data /dev/sdc

[root@node01 cluster]# ceph-deploy osd create node01 --data /dev/sdd

[root@node01 cluster]# ceph-deploy osd create node02 --data /dev/sdb

[root@node01 cluster]# ceph-deploy osd create node02 --data /dev/sdc

[root@node01 cluster]# ceph-deploy osd create node02 --data /dev/sdd

[root@node01 cluster]# ceph-deploy osd create node03 --data /dev/sdb

[root@node01 cluster]# ceph-deploy osd create node03 --data /dev/sdc

[root@node01 cluster]# ceph-deploy osd create node03 --data /dev/sdd

[root@node01 cluster]# ceph-deploy osd create node01 --data /dev/sdb
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node01 --data /dev/sdb
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0a97390f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node01
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f0a97c102a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node01
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node01][WARNIN] osd keyring does not exist yet, creating one
[node01][DEBUG ] create a keyring file
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[node01][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node01][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 8ffe5922-9235-4740-a711-f79594c517e8
[node01][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b /dev/sdb
[node01][WARNIN]  stdout: Physical volume "/dev/sdb" successfully created.
[node01][WARNIN]  stdout: Volume group "ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b" successfully created
[node01][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-8ffe5922-9235-4740-a711-f79594c517e8 ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b
[node01][WARNIN]  stdout: Logical volume "osd-block-8ffe5922-9235-4740-a711-f79594c517e8" created.
[node01][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node01][WARNIN] Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
[node01][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b/osd-block-8ffe5922-9235-4740-a711-f79594c517e8
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
[node01][WARNIN] Running command: /usr/bin/ln -s /dev/ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b/osd-block-8ffe5922-9235-4740-a711-f79594c517e8 /var/lib/ceph/osd/ceph-0/block
[node01][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
[node01][WARNIN]  stderr: 2020-11-12 15:34:25.726 7fb6f6b94700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node01][WARNIN] 2020-11-12 15:34:25.726 7fb6f6b94700 -1 AuthRegistry(0x7fb6f0065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node01][WARNIN]  stderr: got monmap epoch 2
[node01][WARNIN] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQCA5axf7j2qNxAACwvKjsqkOOyxCL1gbDAI7Q==
[node01][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-0/keyring
[node01][WARNIN] added entity osd.0 auth(key=AQCA5axf7j2qNxAACwvKjsqkOOyxCL1gbDAI7Q==)
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
[node01][WARNIN] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 8ffe5922-9235-4740-a711-f79594c517e8 --setuser ceph --setgroup ceph
[node01][WARNIN]  stderr: 2020-11-12 15:34:26.334 7f7872156a80 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
[node01][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdb
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[node01][WARNIN] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b/osd-block-8ffe5922-9235-4740-a711-f79594c517e8 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
[node01][WARNIN] Running command: /usr/bin/ln -snf /dev/ceph-15aeb760-677d-47b4-8f8a-cce9bb39d84b/osd-block-8ffe5922-9235-4740-a711-f79594c517e8 /var/lib/ceph/osd/ceph-0/block
[node01][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[node01][WARNIN] Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-8ffe5922-9235-4740-a711-f79594c517e8
[node01][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-8ffe5922-9235-4740-a711-f79594c517e8.service to /usr/lib/systemd/system/ceph-volume@.service.
[node01][WARNIN] Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
[node01][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to /usr/lib/systemd/system/ceph-osd@.service.
[node01][WARNIN] Running command: /usr/bin/systemctl start ceph-osd@0
[node01][WARNIN] --> ceph-volume lvm activate successful for osd ID: 0
[node01][WARNIN] --> ceph-volume lvm create successful for: /dev/sdb
[node01][INFO  ] checking OSD status...
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node01 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 37m)
    mgr: no daemons active
    osd: 1 osds: 1 up (since 46s), 1 in (since 46s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node01 --data /dev/sdc
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node01 --data /dev/sdc
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0f90954f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node01
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f0f911d42a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdc
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node01
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[node01][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node01][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 2ab77d24-d999-4fd3-871c-3003b65cc718
[node01][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-777aa516-f115-405a-b9c0-dfdc40adf94f /dev/sdc
[node01][WARNIN]  stdout: Physical volume "/dev/sdc" successfully created.
[node01][WARNIN]  stdout: Volume group "ceph-777aa516-f115-405a-b9c0-dfdc40adf94f" successfully created
[node01][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-2ab77d24-d999-4fd3-871c-3003b65cc718 ceph-777aa516-f115-405a-b9c0-dfdc40adf94f
[node01][WARNIN]  stdout: Logical volume "osd-block-2ab77d24-d999-4fd3-871c-3003b65cc718" created.
[node01][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node01][WARNIN] Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1
[node01][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-777aa516-f115-405a-b9c0-dfdc40adf94f/osd-block-2ab77d24-d999-4fd3-871c-3003b65cc718
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
[node01][WARNIN] Running command: /usr/bin/ln -s /dev/ceph-777aa516-f115-405a-b9c0-dfdc40adf94f/osd-block-2ab77d24-d999-4fd3-871c-3003b65cc718 /var/lib/ceph/osd/ceph-1/block
[node01][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
[node01][WARNIN]  stderr: 2020-11-12 15:37:24.629 7fd2330b7700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node01][WARNIN] 2020-11-12 15:37:24.629 7fd2330b7700 -1 AuthRegistry(0x7fd22c065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node01][WARNIN]  stderr: got monmap epoch 2
[node01][WARNIN] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQAz5qxfWfy+NhAAxIbsmTCoACrMwJPXdSLRjQ==
[node01][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-1/keyring
[node01][WARNIN] added entity osd.1 auth(key=AQAz5qxfWfy+NhAAxIbsmTCoACrMwJPXdSLRjQ==)
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
[node01][WARNIN] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 2ab77d24-d999-4fd3-871c-3003b65cc718 --setuser ceph --setgroup ceph
[node01][WARNIN]  stderr: 2020-11-12 15:37:25.208 7f5d9b58ca80 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid
[node01][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdc
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
[node01][WARNIN] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-777aa516-f115-405a-b9c0-dfdc40adf94f/osd-block-2ab77d24-d999-4fd3-871c-3003b65cc718 --path /var/lib/ceph/osd/ceph-1 --no-mon-config
[node01][WARNIN] Running command: /usr/bin/ln -snf /dev/ceph-777aa516-f115-405a-b9c0-dfdc40adf94f/osd-block-2ab77d24-d999-4fd3-871c-3003b65cc718 /var/lib/ceph/osd/ceph-1/block
[node01][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
[node01][WARNIN] Running command: /usr/bin/systemctl enable ceph-volume@lvm-1-2ab77d24-d999-4fd3-871c-3003b65cc718
[node01][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-1-2ab77d24-d999-4fd3-871c-3003b65cc718.service to /usr/lib/systemd/system/ceph-volume@.service.
[node01][WARNIN] Running command: /usr/bin/systemctl enable --runtime ceph-osd@1
[node01][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@1.service to /usr/lib/systemd/system/ceph-osd@.service.
[node01][WARNIN] Running command: /usr/bin/systemctl start ceph-osd@1
[node01][WARNIN] --> ceph-volume lvm activate successful for osd ID: 1
[node01][WARNIN] --> ceph-volume lvm create successful for: /dev/sdc
[node01][INFO  ] checking OSD status...
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node01 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 39m)
    mgr: no daemons active
    osd: 2 osds: 2 up (since 7s), 2 in (since 7s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]#
[root@node01 cluster]# ceph-deploy osd create node01 --data /dev/sdd
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node01 --data /dev/sdd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fdcb9828f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node01
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fdcba0a82a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdd
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdd
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[node01][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node01
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdd
[node01][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node01][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 5f153e7a-85a7-446d-a7ec-ef2efec2b3a3
[node01][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b /dev/sdd
[node01][WARNIN]  stdout: Physical volume "/dev/sdd" successfully created.
[node01][WARNIN]  stdout: Volume group "ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b" successfully created
[node01][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3 ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b
[node01][WARNIN]  stdout: Logical volume "osd-block-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3" created.
[node01][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
[node01][WARNIN] Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2
[node01][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b/osd-block-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-4
[node01][WARNIN] Running command: /usr/bin/ln -s /dev/ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b/osd-block-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3 /var/lib/ceph/osd/ceph-2/block
[node01][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap
[node01][WARNIN]  stderr: 2020-11-12 15:38:18.115 7fe2492a1700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node01][WARNIN] 2020-11-12 15:38:18.115 7fe2492a1700 -1 AuthRegistry(0x7fe244065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node01][WARNIN]  stderr: got monmap epoch 2
[node01][WARNIN] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-2/keyring --create-keyring --name osd.2 --add-key AQBp5qxfS5w4FhAAvNbazD8pqbHG3ytO5fQDiw==
[node01][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-2/keyring
[node01][WARNIN] added entity osd.2 auth(key=AQBp5qxfS5w4FhAAvNbazD8pqbHG3ytO5fQDiw==)
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/
[node01][WARNIN] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 5f153e7a-85a7-446d-a7ec-ef2efec2b3a3 --setuser ceph --setgroup ceph
[node01][WARNIN]  stderr: 2020-11-12 15:38:19.727 7ffa6d39fa80 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid
[node01][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdd
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
[node01][WARNIN] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b/osd-block-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3 --path /var/lib/ceph/osd/ceph-2 --no-mon-config
[node01][WARNIN] Running command: /usr/bin/ln -snf /dev/ceph-ddaf1857-a390-4ffa-a21b-bfba478e6c0b/osd-block-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3 /var/lib/ceph/osd/ceph-2/block
[node01][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-4
[node01][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
[node01][WARNIN] Running command: /usr/bin/systemctl enable ceph-volume@lvm-2-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3
[node01][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-2-5f153e7a-85a7-446d-a7ec-ef2efec2b3a3.service to /usr/lib/systemd/system/ceph-volume@.service.
[node01][WARNIN] Running command: /usr/bin/systemctl enable --runtime ceph-osd@2
[node01][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@2.service to /usr/lib/systemd/system/ceph-osd@.service.
[node01][WARNIN] Running command: /usr/bin/systemctl start ceph-osd@2
[node01][WARNIN] --> ceph-volume lvm activate successful for osd ID: 2
[node01][WARNIN] --> ceph-volume lvm create successful for: /dev/sdd
[node01][INFO  ] checking OSD status...
[node01][DEBUG ] find the location of an executable
[node01][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node01 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 40m)
    mgr: no daemons active
    osd: 3 osds: 3 up (since 7s), 3 in (since 7s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node02 --data /dev/sdb 
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node02 --data /dev/sdb
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ff2fe446f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node02
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7ff2fecc62a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node02
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node02][WARNIN] osd keyring does not exist yet, creating one
[node02][DEBUG ] create a keyring file
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[node02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 381b6597-3d41-41bd-8304-cc485c348b52
[node02][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb /dev/sdb
[node02][WARNIN]  stdout: Physical volume "/dev/sdb" successfully created.
[node02][WARNIN]  stdout: Volume group "ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb" successfully created
[node02][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-381b6597-3d41-41bd-8304-cc485c348b52 ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb
[node02][WARNIN]  stdout: Logical volume "osd-block-381b6597-3d41-41bd-8304-cc485c348b52" created.
[node02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node02][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
[node02][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb/osd-block-381b6597-3d41-41bd-8304-cc485c348b52
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-2
[node02][WARNIN] Running command: /bin/ln -s /dev/ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb/osd-block-381b6597-3d41-41bd-8304-cc485c348b52 /var/lib/ceph/osd/ceph-3/block
[node02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
[node02][WARNIN]  stderr: 2020-11-12 15:39:13.792 7f902f899700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node02][WARNIN] 2020-11-12 15:39:13.792 7f902f899700 -1 AuthRegistry(0x7f9028065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node02][WARNIN]  stderr: got monmap epoch 2
[node02][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQCg5qxfQuI4KRAAAL0R+JKWEhrhYXoUEIsIew==
[node02][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-3/keyring
[node02][WARNIN] added entity osd.3 auth(key=AQCg5qxfQuI4KRAAAL0R+JKWEhrhYXoUEIsIew==)
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
[node02][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 381b6597-3d41-41bd-8304-cc485c348b52 --setuser ceph --setgroup ceph
[node02][WARNIN]  stderr: 2020-11-12 15:39:14.975 7fb218602a80 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid
[node02][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdb
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[node02][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb/osd-block-381b6597-3d41-41bd-8304-cc485c348b52 --path /var/lib/ceph/osd/ceph-3 --no-mon-config
[node02][WARNIN] Running command: /bin/ln -snf /dev/ceph-91956a7c-5375-4dea-81c0-5cb9e2f514fb/osd-block-381b6597-3d41-41bd-8304-cc485c348b52 /var/lib/ceph/osd/ceph-3/block
[node02][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-2
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[node02][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-3-381b6597-3d41-41bd-8304-cc485c348b52
[node02][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-381b6597-3d41-41bd-8304-cc485c348b52.service to /usr/lib/systemd/system/ceph-volume@.service.
[node02][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@3
[node02][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service to /usr/lib/systemd/system/ceph-osd@.service.
[node02][WARNIN] Running command: /bin/systemctl start ceph-osd@3
[node02][WARNIN] --> ceph-volume lvm activate successful for osd ID: 3
[node02][WARNIN] --> ceph-volume lvm create successful for: /dev/sdb
[node02][INFO  ] checking OSD status...
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node02 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 41m)
    mgr: no daemons active
    osd: 4 osds: 4 up (since 8s), 4 in (since 8s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node02 --data /dev/sdc
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node02 --data /dev/sdc
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ff0f7ec6f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node02
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7ff0f87462a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdc
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node02
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[node02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new b8e2c607-aed1-45a0-a93e-8c6a2ec8e203
[node02][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0 /dev/sdc
[node02][WARNIN]  stdout: Physical volume "/dev/sdc" successfully created.
[node02][WARNIN]  stdout: Volume group "ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0" successfully created
[node02][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203 ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0
[node02][WARNIN]  stdout: Logical volume "osd-block-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203" created.
[node02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node02][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4
[node02][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0/osd-block-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-3
[node02][WARNIN] Running command: /bin/ln -s /dev/ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0/osd-block-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203 /var/lib/ceph/osd/ceph-4/block
[node02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap
[node02][WARNIN]  stderr: 2020-11-12 15:39:57.360 7fb72422a700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node02][WARNIN] 2020-11-12 15:39:57.360 7fb72422a700 -1 AuthRegistry(0x7fb71c065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node02][WARNIN]  stderr: got monmap epoch 2
[node02][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQDM5qxf6FjpGBAArvaVdfRxM7iMqMB9OI10Dw==
[node02][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-4/keyring
[node02][WARNIN] added entity osd.4 auth(key=AQDM5qxf6FjpGBAArvaVdfRxM7iMqMB9OI10Dw==)
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/
[node02][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid b8e2c607-aed1-45a0-a93e-8c6a2ec8e203 --setuser ceph --setgroup ceph
[node02][WARNIN]  stderr: 2020-11-12 15:39:57.764 7f3698667a80 -1 bluestore(/var/lib/ceph/osd/ceph-4/) _read_fsid unparsable uuid
[node02][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdc
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[node02][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0/osd-block-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203 --path /var/lib/ceph/osd/ceph-4 --no-mon-config
[node02][WARNIN] Running command: /bin/ln -snf /dev/ceph-9aeac6d5-82eb-425c-9b8f-44d075b34bc0/osd-block-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203 /var/lib/ceph/osd/ceph-4/block
[node02][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-3
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[node02][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-4-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203
[node02][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-b8e2c607-aed1-45a0-a93e-8c6a2ec8e203.service to /usr/lib/systemd/system/ceph-volume@.service.
[node02][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@4
[node02][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.
[node02][WARNIN] Running command: /bin/systemctl start ceph-osd@4
[node02][WARNIN] --> ceph-volume lvm activate successful for osd ID: 4
[node02][WARNIN] --> ceph-volume lvm create successful for: /dev/sdc
[node02][INFO  ] checking OSD status...
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node02 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 42m)
    mgr: no daemons active
    osd: 5 osds: 5 up (since 5s), 5 in (since 5s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node02 --data /dev/sdd
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node02 --data /dev/sdd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fc1aef61f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node02
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fc1af7e12a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdd
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdd
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[node02][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node02
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdd
[node02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new c8049801-9529-406e-a8bf-35664a49d762
[node02][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40 /dev/sdd
[node02][WARNIN]  stdout: Physical volume "/dev/sdd" successfully created.
[node02][WARNIN]  stdout: Volume group "ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40" successfully created
[node02][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-c8049801-9529-406e-a8bf-35664a49d762 ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40
[node02][WARNIN]  stdout: Logical volume "osd-block-c8049801-9529-406e-a8bf-35664a49d762" created.
[node02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node02][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-5
[node02][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40/osd-block-c8049801-9529-406e-a8bf-35664a49d762
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-4
[node02][WARNIN] Running command: /bin/ln -s /dev/ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40/osd-block-c8049801-9529-406e-a8bf-35664a49d762 /var/lib/ceph/osd/ceph-5/block
[node02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-5/activate.monmap
[node02][WARNIN]  stderr: 2020-11-12 15:40:35.710 7f34b5f69700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node02][WARNIN] 2020-11-12 15:40:35.710 7f34b5f69700 -1 AuthRegistry(0x7f34b0065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node02][WARNIN]  stderr: got monmap epoch 2
[node02][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-5/keyring --create-keyring --name osd.5 --add-key AQDy5qxfT8bsKhAA7mF48viHKgy1j21rpIuJhQ==
[node02][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-5/keyring
[node02][WARNIN] added entity osd.5 auth(key=AQDy5qxfT8bsKhAA7mF48viHKgy1j21rpIuJhQ==)
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/keyring
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/
[node02][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 5 --monmap /var/lib/ceph/osd/ceph-5/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-5/ --osd-uuid c8049801-9529-406e-a8bf-35664a49d762 --setuser ceph --setgroup ceph
[node02][WARNIN]  stderr: 2020-11-12 15:40:37.286 7f5f98187a80 -1 bluestore(/var/lib/ceph/osd/ceph-5/) _read_fsid unparsable uuid
[node02][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdd
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
[node02][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40/osd-block-c8049801-9529-406e-a8bf-35664a49d762 --path /var/lib/ceph/osd/ceph-5 --no-mon-config
[node02][WARNIN] Running command: /bin/ln -snf /dev/ceph-db29c417-b9ce-4b5c-bfe3-ee5cab54da40/osd-block-c8049801-9529-406e-a8bf-35664a49d762 /var/lib/ceph/osd/ceph-5/block
[node02][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-4
[node02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
[node02][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-5-c8049801-9529-406e-a8bf-35664a49d762
[node02][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-5-c8049801-9529-406e-a8bf-35664a49d762.service to /usr/lib/systemd/system/ceph-volume@.service.
[node02][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@5
[node02][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@5.service to /usr/lib/systemd/system/ceph-osd@.service.
[node02][WARNIN] Running command: /bin/systemctl start ceph-osd@5
[node02][WARNIN] --> ceph-volume lvm activate successful for osd ID: 5
[node02][WARNIN] --> ceph-volume lvm create successful for: /dev/sdd
[node02][INFO  ] checking OSD status...
[node02][DEBUG ] find the location of an executable
[node02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node02 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 43m)
    mgr: no daemons active
    osd: 6 osds: 6 up (since 6s), 6 in (since 6s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node03 --data /dev/sdb 
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node03 --data /dev/sdb
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fb54f86ef80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node03
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fb5500ee2a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node03
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node03][WARNIN] osd keyring does not exist yet, creating one
[node03][DEBUG ] create a keyring file
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[node03][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node03][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ce83292-8b3f-45c5-bb41-ddb3ef789a5c
[node03][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-bd30de39-0962-4012-b54e-c353bf3f8330 /dev/sdb
[node03][WARNIN]  stdout: Physical volume "/dev/sdb" successfully created.
[node03][WARNIN]  stdout: Volume group "ceph-bd30de39-0962-4012-b54e-c353bf3f8330" successfully created
[node03][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c ceph-bd30de39-0962-4012-b54e-c353bf3f8330
[node03][WARNIN]  stdout: Logical volume "osd-block-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c" created.
[node03][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node03][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-6
[node03][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-bd30de39-0962-4012-b54e-c353bf3f8330/osd-block-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-2
[node03][WARNIN] Running command: /bin/ln -s /dev/ceph-bd30de39-0962-4012-b54e-c353bf3f8330/osd-block-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c /var/lib/ceph/osd/ceph-6/block
[node03][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-6/activate.monmap
[node03][WARNIN]  stderr: 2020-11-12 15:41:19.876 7f2a5ab24700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node03][WARNIN] 2020-11-12 15:41:19.876 7f2a5ab24700 -1 AuthRegistry(0x7f2a54065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node03][WARNIN]  stderr: got monmap epoch 2
[node03][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-6/keyring --create-keyring --name osd.6 --add-key AQAf56xfAwafBhAANdJIGFiX7+tbdQttMqLobg==
[node03][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-6/keyring
[node03][WARNIN] added entity osd.6 auth(key=AQAf56xfAwafBhAANdJIGFiX7+tbdQttMqLobg==)
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/keyring
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/
[node03][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 6 --monmap /var/lib/ceph/osd/ceph-6/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-6/ --osd-uuid 4ce83292-8b3f-45c5-bb41-ddb3ef789a5c --setuser ceph --setgroup ceph
[node03][WARNIN]  stderr: 2020-11-12 15:41:20.528 7f608fe1fa80 -1 bluestore(/var/lib/ceph/osd/ceph-6/) _read_fsid unparsable uuid
[node03][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdb
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6
[node03][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bd30de39-0962-4012-b54e-c353bf3f8330/osd-block-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c --path /var/lib/ceph/osd/ceph-6 --no-mon-config
[node03][WARNIN] Running command: /bin/ln -snf /dev/ceph-bd30de39-0962-4012-b54e-c353bf3f8330/osd-block-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c /var/lib/ceph/osd/ceph-6/block
[node03][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-2
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6
[node03][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-6-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c
[node03][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-6-4ce83292-8b3f-45c5-bb41-ddb3ef789a5c.service to /usr/lib/systemd/system/ceph-volume@.service.
[node03][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@6
[node03][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@6.service to /usr/lib/systemd/system/ceph-osd@.service.
[node03][WARNIN] Running command: /bin/systemctl start ceph-osd@6
[node03][WARNIN] --> ceph-volume lvm activate successful for osd ID: 6
[node03][WARNIN] --> ceph-volume lvm create successful for: /dev/sdb
[node03][INFO  ] checking OSD status...
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node03 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 43m)
    mgr: no daemons active
    osd: 7 osds: 7 up (since 6s), 7 in (since 6s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node03 --data /dev/sdc
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node03 --data /dev/sdc
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fec94eaef80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node03
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fec9572e2a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdc
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node03
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[node03][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node03][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 714d5d28-07a9-41f4-b507-cc68d5515c9f
[node03][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc /dev/sdc
[node03][WARNIN]  stdout: Physical volume "/dev/sdc" successfully created.
[node03][WARNIN]  stdout: Volume group "ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc" successfully created
[node03][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-714d5d28-07a9-41f4-b507-cc68d5515c9f ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc
[node03][WARNIN]  stdout: Logical volume "osd-block-714d5d28-07a9-41f4-b507-cc68d5515c9f" created.
[node03][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node03][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-7
[node03][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc/osd-block-714d5d28-07a9-41f4-b507-cc68d5515c9f
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-3
[node03][WARNIN] Running command: /bin/ln -s /dev/ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc/osd-block-714d5d28-07a9-41f4-b507-cc68d5515c9f /var/lib/ceph/osd/ceph-7/block
[node03][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-7/activate.monmap
[node03][WARNIN]  stderr: 2020-11-12 15:42:04.999 7f0c45466700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node03][WARNIN] 2020-11-12 15:42:04.999 7f0c45466700 -1 AuthRegistry(0x7f0c40065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node03][WARNIN]  stderr: got monmap epoch 2
[node03][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-7/keyring --create-keyring --name osd.7 --add-key AQBM56xfhXaRChAALfvZ8pWEKy/qEI/z2D1AnQ==
[node03][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-7/keyring
[node03][WARNIN] added entity osd.7 auth(key=AQBM56xfhXaRChAALfvZ8pWEKy/qEI/z2D1AnQ==)
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/keyring
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/
[node03][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 7 --monmap /var/lib/ceph/osd/ceph-7/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-7/ --osd-uuid 714d5d28-07a9-41f4-b507-cc68d5515c9f --setuser ceph --setgroup ceph
[node03][WARNIN]  stderr: 2020-11-12 15:42:05.342 7f96acbaca80 -1 bluestore(/var/lib/ceph/osd/ceph-7/) _read_fsid unparsable uuid
[node03][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdc
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7
[node03][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc/osd-block-714d5d28-07a9-41f4-b507-cc68d5515c9f --path /var/lib/ceph/osd/ceph-7 --no-mon-config
[node03][WARNIN] Running command: /bin/ln -snf /dev/ceph-4e04f1f1-9018-40d4-ac70-e82c2d5aa9cc/osd-block-714d5d28-07a9-41f4-b507-cc68d5515c9f /var/lib/ceph/osd/ceph-7/block
[node03][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-3
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7
[node03][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-7-714d5d28-07a9-41f4-b507-cc68d5515c9f
[node03][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-7-714d5d28-07a9-41f4-b507-cc68d5515c9f.service to /usr/lib/systemd/system/ceph-volume@.service.
[node03][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@7
[node03][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@7.service to /usr/lib/systemd/system/ceph-osd@.service.
[node03][WARNIN] Running command: /bin/systemctl start ceph-osd@7
[node03][WARNIN] --> ceph-volume lvm activate successful for osd ID: 7
[node03][WARNIN] --> ceph-volume lvm create successful for: /dev/sdc
[node03][INFO  ] checking OSD status...
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node03 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 44m)
    mgr: no daemons active
    osd: 8 osds: 8 up (since 5s), 8 in (since 5s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy osd create node03 --data /dev/sdd
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create node03 --data /dev/sdd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0618744f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : None
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : node03
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f0618fc42a8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdd
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdd
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[node03][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node03
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdd
[node03][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node03][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 5913569b-0783-4a5a-9157-bbf936bf9a88
[node03][WARNIN] Running command: /usr/sbin/vgcreate --force --yes ceph-b661755e-39af-425f-83c6-a908fb0ad2f9 /dev/sdd
[node03][WARNIN]  stdout: Physical volume "/dev/sdd" successfully created.
[node03][WARNIN]  stdout: Volume group "ceph-b661755e-39af-425f-83c6-a908fb0ad2f9" successfully created
[node03][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 2559 -n osd-block-5913569b-0783-4a5a-9157-bbf936bf9a88 ceph-b661755e-39af-425f-83c6-a908fb0ad2f9
[node03][WARNIN]  stdout: Logical volume "osd-block-5913569b-0783-4a5a-9157-bbf936bf9a88" created.
[node03][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[node03][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-8
[node03][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-b661755e-39af-425f-83c6-a908fb0ad2f9/osd-block-5913569b-0783-4a5a-9157-bbf936bf9a88
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-4
[node03][WARNIN] Running command: /bin/ln -s /dev/ceph-b661755e-39af-425f-83c6-a908fb0ad2f9/osd-block-5913569b-0783-4a5a-9157-bbf936bf9a88 /var/lib/ceph/osd/ceph-8/block
[node03][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-8/activate.monmap
[node03][WARNIN]  stderr: 2020-11-12 15:42:29.428 7f8722fc3700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[node03][WARNIN] 2020-11-12 15:42:29.428 7f8722fc3700 -1 AuthRegistry(0x7f871c065a98) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx
[node03][WARNIN]  stderr: got monmap epoch 2
[node03][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-8/keyring --create-keyring --name osd.8 --add-key AQBk56xfIFKiIhAA3KEZc2bNrEHUH8cdb/0ZcQ==
[node03][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-8/keyring
[node03][WARNIN] added entity osd.8 auth(key=AQBk56xfIFKiIhAA3KEZc2bNrEHUH8cdb/0ZcQ==)
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-8/keyring
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-8/
[node03][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 8 --monmap /var/lib/ceph/osd/ceph-8/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-8/ --osd-uuid 5913569b-0783-4a5a-9157-bbf936bf9a88 --setuser ceph --setgroup ceph
[node03][WARNIN]  stderr: 2020-11-12 15:42:30.155 7fdc18603a80 -1 bluestore(/var/lib/ceph/osd/ceph-8/) _read_fsid unparsable uuid
[node03][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdd
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-8
[node03][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b661755e-39af-425f-83c6-a908fb0ad2f9/osd-block-5913569b-0783-4a5a-9157-bbf936bf9a88 --path /var/lib/ceph/osd/ceph-8 --no-mon-config
[node03][WARNIN] Running command: /bin/ln -snf /dev/ceph-b661755e-39af-425f-83c6-a908fb0ad2f9/osd-block-5913569b-0783-4a5a-9157-bbf936bf9a88 /var/lib/ceph/osd/ceph-8/block
[node03][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-8/block
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-4
[node03][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-8
[node03][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-8-5913569b-0783-4a5a-9157-bbf936bf9a88
[node03][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-8-5913569b-0783-4a5a-9157-bbf936bf9a88.service to /usr/lib/systemd/system/ceph-volume@.service.
[node03][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@8
[node03][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@8.service to /usr/lib/systemd/system/ceph-osd@.service.
[node03][WARNIN] Running command: /bin/systemctl start ceph-osd@8
[node03][WARNIN] --> ceph-volume lvm activate successful for osd ID: 8
[node03][WARNIN] --> ceph-volume lvm create successful for: /dev/sdd
[node03][INFO  ] checking OSD status...
[node03][DEBUG ] find the location of an executable
[node03][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host node03 is now ready for osd use.
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 44m)
    mgr: no daemons active
    osd: 9 osds: 9 up (since 5s), 9 in (since 5s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@node01 cluster]# 

11.创建mgr和rgsw服务

[root@node01 cluster]# ceph-deploy mgr create node01 node02 node03

[root@node01 cluster]# ceph-deploy rgw create node01 node02 node03

[root@node01 cluster]# netstat -tnlp|grep -aiE 6789

[root@node02 ~]# netstat -tnlp|grep -aiE 6789

[root@node03 ~]# netstat -tnlp|grep -aiE 6789

[root@node01 cluster]# ceph-deploy mgr create node01 node02 node03
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mgr create node01 node02 node03
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  mgr                           : [('node01', 'node01'), ('node02', 'node02'), ('node03', 'node03')]
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f167b3d5200>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mgr at 0x7f167bc42aa0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mgr][DEBUG ] Deploying mgr, cluster ceph hosts node01:node01 node02:node02 node03:node03
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to node01
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node01][WARNIN] mgr keyring does not exist yet, creating one
[node01][DEBUG ] create a keyring file
[node01][DEBUG ] create path recursively if it doesn't exist
[node01][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.node01 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-node01/keyring
[node01][INFO  ] Running command: systemctl enable ceph-mgr@node01
[node01][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@node01.service to /usr/lib/systemd/system/ceph-mgr@.service.
[node01][INFO  ] Running command: systemctl start ceph-mgr@node01
[node01][INFO  ] Running command: systemctl enable ceph.target
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to node02
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node02][WARNIN] mgr keyring does not exist yet, creating one
[node02][DEBUG ] create a keyring file
[node02][DEBUG ] create path recursively if it doesn't exist
[node02][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.node02 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-node02/keyring
[node02][INFO  ] Running command: systemctl enable ceph-mgr@node02
[node02][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@node02.service to /usr/lib/systemd/system/ceph-mgr@.service.
[node02][INFO  ] Running command: systemctl start ceph-mgr@node02
[node02][INFO  ] Running command: systemctl enable ceph.target
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to node03
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node03][WARNIN] mgr keyring does not exist yet, creating one
[node03][DEBUG ] create a keyring file
[node03][DEBUG ] create path recursively if it doesn't exist
[node03][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.node03 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-node03/keyring
[node03][INFO  ] Running command: systemctl enable ceph-mgr@node03
[node03][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@node03.service to /usr/lib/systemd/system/ceph-mgr@.service.
[node03][INFO  ] Running command: systemctl start ceph-mgr@node03
[node03][INFO  ] Running command: systemctl enable ceph.target
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 47m)
    mgr: node01(active, since 9s), standbys: node03, node02
    osd: 9 osds: 9 up (since 3m), 9 in (since 3m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   9.0 GiB used, 81 GiB / 90 GiB avail
    pgs:     
 
[root@node01 cluster]# 
[root@node01 cluster]# ceph-deploy rgw create node01 node02 node03
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy rgw create node01 node02 node03
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  rgw                           : [('node01', 'rgw.node01'), ('node02', 'rgw.node02'), ('node03', 'rgw.node03')]
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fa344a4a5f0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function rgw at 0x7fa3455169b0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts node01:rgw.node01 node02:rgw.node02 node03:rgw.node03
[node01][DEBUG ] connected to host: node01 
[node01][DEBUG ] detect platform information from remote host
[node01][DEBUG ] detect machine type
[ceph_deploy.rgw][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.rgw][DEBUG ] remote host will use systemd
[ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to node01
[node01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node01][WARNIN] rgw keyring does not exist yet, creating one
[node01][DEBUG ] create a keyring file
[node01][DEBUG ] create path recursively if it doesn't exist
[node01][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.node01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.node01/keyring
[node01][INFO  ] Running command: systemctl enable ceph-radosgw@rgw.node01
[node01][WARNIN] Created symlink from /etc/systemd/system/ceph-radosgw.target.wants/ceph-radosgw@rgw.node01.service to /usr/lib/systemd/system/ceph-radosgw@.service.
[node01][INFO  ] Running command: systemctl start ceph-radosgw@rgw.node01
[node01][INFO  ] Running command: systemctl enable ceph.target
[ceph_deploy.rgw][INFO  ] The Ceph Object Gateway (RGW) is now running on host node01 and default port 7480
[node02][DEBUG ] connected to host: node02 
[node02][DEBUG ] detect platform information from remote host
[node02][DEBUG ] detect machine type
[ceph_deploy.rgw][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.rgw][DEBUG ] remote host will use systemd
[ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to node02
[node02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node02][WARNIN] rgw keyring does not exist yet, creating one
[node02][DEBUG ] create a keyring file
[node02][DEBUG ] create path recursively if it doesn't exist
[node02][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.node02 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.node02/keyring
[node02][INFO  ] Running command: systemctl enable ceph-radosgw@rgw.node02
[node02][WARNIN] Created symlink from /etc/systemd/system/ceph-radosgw.target.wants/ceph-radosgw@rgw.node02.service to /usr/lib/systemd/system/ceph-radosgw@.service.
[node02][INFO  ] Running command: systemctl start ceph-radosgw@rgw.node02
[node02][INFO  ] Running command: systemctl enable ceph.target
[ceph_deploy.rgw][INFO  ] The Ceph Object Gateway (RGW) is now running on host node02 and default port 7480
[node03][DEBUG ] connected to host: node03 
[node03][DEBUG ] detect platform information from remote host
[node03][DEBUG ] detect machine type
[ceph_deploy.rgw][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.rgw][DEBUG ] remote host will use systemd
[ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to node03
[node03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node03][WARNIN] rgw keyring does not exist yet, creating one
[node03][DEBUG ] create a keyring file
[node03][DEBUG ] create path recursively if it doesn't exist
[node03][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.node03 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.node03/keyring
[node03][INFO  ] Running command: systemctl enable ceph-radosgw@rgw.node03
[node03][WARNIN] Created symlink from /etc/systemd/system/ceph-radosgw.target.wants/ceph-radosgw@rgw.node03.service to /usr/lib/systemd/system/ceph-radosgw@.service.
[node03][INFO  ] Running command: systemctl start ceph-radosgw@rgw.node03
[node03][INFO  ] Running command: systemctl enable ceph.target
[ceph_deploy.rgw][INFO  ] The Ceph Object Gateway (RGW) is now running on host node03 and default port 7480
[root@node01 cluster]# 
[root@node01 cluster]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 49m)
    mgr: node01(active, since 110s), standbys: node03, node02
    osd: 9 osds: 9 up (since 4m), 9 in (since 4m)
    rgw: 3 daemons active (node01, node02, node03)
 
  task status:
 
  data:
    pools:   4 pools, 128 pgs
    objects: 187 objects, 1.2 KiB
    usage:   9.1 GiB used, 81 GiB / 90 GiB avail
    pgs:     128 active+clean
 
  io:
    client:   73 KiB/s rd, 0 B/s wr, 80 op/s rd, 53 op/s wr
 
[root@node01 cluster]# 
[root@node01 cluster]# netstat -tnlp|grep -aiE 6789
tcp        0      0 192.168.1.181:6789      0.0.0.0:*               LISTEN      9811/ceph-mon       
[root@node01 cluster]# 
[root@node02 ~]# netstat -tnlp|grep -aiE 6789
tcp        0      0 192.168.1.182:6789      0.0.0.0:*               LISTEN      9596/ceph-mon       
[root@node02 ~]# 
[root@node03 ~]# netstat -tnlp|grep -aiE 6789
tcp        0      0 192.168.1.183:6789      0.0.0.0:*               LISTEN      9600/ceph-mon       
[root@node03 ~]# 

12.在node01查看相关的服务和端口,如图所示,Ceph分布式节点部署成功

[root@node01 ~]# ps -ef|grep ceph

[root@node01 ~]# netstat -tnlp

[root@node01 ~]# ceph -s

[root@node01 ~]# ps -ef|grep ceph
root        9206       1  0 14:57 ?        00:00:00 /usr/bin/python2.7 /usr/bin/ceph-crash
ceph        9811       1  0 14:57 ?        00:00:13 /usr/bin/ceph-mon -f --cluster ceph --id node01 --setuser ceph --setgroup ceph
ceph       11775       1  0 15:34 ?        00:00:08 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph
ceph       12731       1  0 15:37 ?        00:00:08 /usr/bin/ceph-osd -f --cluster ceph --id 1 --setuser ceph --setgroup ceph
ceph       13232       1  0 15:38 ?        00:00:07 /usr/bin/ceph-osd -f --cluster ceph --id 2 --setuser ceph --setgroup ceph
ceph       13762       1  1 15:45 ?        00:00:17 /usr/bin/ceph-mgr -f --cluster ceph --id node01 --setuser ceph --setgroup ceph
ceph       14008       1  0 15:46 ?        00:00:02 /usr/bin/radosgw -f --cluster ceph --name client.rgw.node01 --setuser ceph --setgroup ceph
root       15116   10741  0 16:11 pts/0    00:00:00 grep --color=auto ceph
[root@node01 ~]# 
[root@node01 ~]# netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:6823            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 0.0.0.0:6824            0.0.0.0:*               LISTEN      13762/ceph-mgr      
tcp        0      0 0.0.0.0:6825            0.0.0.0:*               LISTEN      13762/ceph-mgr      
tcp        0      0 192.168.1.181:2379      0.0.0.0:*               LISTEN      9817/etcd           
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      9817/etcd           
tcp        0      0 192.168.1.181:2380      0.0.0.0:*               LISTEN      9817/etcd           
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 0.0.0.0:6800            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      10120/X             
tcp        0      0 0.0.0.0:6801            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:6802            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:6803            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:6804            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:6805            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:6806            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      9812/sshd           
tcp        0      0 0.0.0.0:6807            0.0.0.0:*               LISTEN      11775/ceph-osd      
tcp        0      0 127.0.0.1:631           0.0.0.0:*               LISTEN      9815/cupsd          
tcp        0      0 0.0.0.0:7480            0.0.0.0:*               LISTEN      14008/radosgw       
tcp        0      0 0.0.0.0:6808            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6809            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      10059/master        
tcp        0      0 0.0.0.0:6810            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6811            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6812            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6813            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6814            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6815            0.0.0.0:*               LISTEN      12731/ceph-osd      
tcp        0      0 0.0.0.0:6816            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 0.0.0.0:6817            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 0.0.0.0:6818            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 0.0.0.0:6819            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 0.0.0.0:6820            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 192.168.1.181:3300      0.0.0.0:*               LISTEN      9811/ceph-mon       
tcp        0      0 0.0.0.0:6821            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp        0      0 192.168.1.181:6789      0.0.0.0:*               LISTEN      9811/ceph-mon       
tcp        0      0 0.0.0.0:6822            0.0.0.0:*               LISTEN      13232/ceph-osd      
tcp6       0      0 :::6443                 :::*                    LISTEN      10467/kube-apiserve 
tcp6       0      0 :::10251                :::*                    LISTEN      9186/kube-scheduler 
tcp6       0      0 :::10252                :::*                    LISTEN      9250/kube-controlle 
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd           
tcp6       0      0 :::8080                 :::*                    LISTEN      10467/kube-apiserve 
tcp6       0      0 :::6000                 :::*                    LISTEN      10120/X             
tcp6       0      0 :::22                   :::*                    LISTEN      9812/sshd           
tcp6       0      0 ::1:631                 :::*                    LISTEN      9815/cupsd          
tcp6       0      0 :::7480                 :::*                    LISTEN      14008/radosgw       
tcp6       0      0 ::1:25                  :::*                    LISTEN      10059/master        
[root@node01 ~]# 
[root@node01 ~]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 73m)
    mgr: node01(active, since 25m), standbys: node03, node02
    osd: 9 osds: 9 up (since 28m), 9 in (since 28m)
    rgw: 3 daemons active (node01, node02, node03)
 
  task status:
 
  data:
    pools:   4 pools, 128 pgs
    objects: 187 objects, 1.2 KiB
    usage:   9.1 GiB used, 81 GiB / 90 GiB avail
    pgs:     128 active+clean
 
[root@node01 ~]# 

image-20201112155925939

image-20201112160130282

13.在node02查看相关的服务和端口,如图所示,Ceph分布式节点部署成功

[root@node02 ~]# ps -ef|grep ceph

[root@node02 ~]# netstat -tnlp

[root@node02 ~]# ceph -s

[root@node02 ~]# ps -ef|grep ceph
root        9142       1  0 14:57 ?        00:00:00 /usr/bin/python2.7 /usr/bin/ceph-crash
ceph        9596       1  0 14:57 ?        00:00:09 /usr/bin/ceph-mon -f --cluster ceph --id node02 --setuser ceph --setgroup ceph
ceph       48042       1  0 15:39 ?        00:00:08 /usr/bin/ceph-osd -f --cluster ceph --id 3 --setuser ceph --setgroup ceph
ceph       48997       1  0 15:39 ?        00:00:08 /usr/bin/ceph-osd -f --cluster ceph --id 4 --setuser ceph --setgroup ceph
ceph       49927       1  0 15:40 ?        00:00:08 /usr/bin/ceph-osd -f --cluster ceph --id 5 --setuser ceph --setgroup ceph
ceph       53291       1  0 15:45 ?        00:00:04 /usr/bin/ceph-mgr -f --cluster ceph --id node02 --setuser ceph --setgroup ceph
ceph       54454       1  0 15:47 ?        00:00:03 /usr/bin/radosgw -f --cluster ceph --name client.rgw.node02 --setuser ceph --setgroup ceph
root       70838   21557  0 16:11 pts/0    00:00:00 grep --color=auto ceph
[root@node02 ~]# 
[root@node02 ~]# netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:6823            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      10235/kubelet       
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      9605/kube-proxy     
tcp        0      0 192.168.1.182:2379      0.0.0.0:*               LISTEN      9603/etcd           
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      9603/etcd           
tcp        0      0 192.168.1.182:2380      0.0.0.0:*               LISTEN      9603/etcd           
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 0.0.0.0:6800            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      9918/X              
tcp        0      0 0.0.0.0:6801            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:6802            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:6803            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:6804            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:6805            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:6806            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      9594/sshd           
tcp        0      0 0.0.0.0:6807            0.0.0.0:*               LISTEN      48042/ceph-osd      
tcp        0      0 127.0.0.1:631           0.0.0.0:*               LISTEN      9600/cupsd          
tcp        0      0 0.0.0.0:7480            0.0.0.0:*               LISTEN      54454/radosgw       
tcp        0      0 0.0.0.0:6808            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6809            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      10038/master        
tcp        0      0 0.0.0.0:6810            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6811            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6812            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6813            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6814            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6815            0.0.0.0:*               LISTEN      48997/ceph-osd      
tcp        0      0 0.0.0.0:6816            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 0.0.0.0:6817            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 0.0.0.0:6818            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 0.0.0.0:6819            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 0.0.0.0:6820            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 192.168.1.182:3300      0.0.0.0:*               LISTEN      9596/ceph-mon       
tcp        0      0 0.0.0.0:6821            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp        0      0 192.168.1.182:6789      0.0.0.0:*               LISTEN      9596/ceph-mon       
tcp        0      0 0.0.0.0:6822            0.0.0.0:*               LISTEN      49927/ceph-osd      
tcp6       0      0 :::10250                :::*                    LISTEN      10235/kubelet       
tcp6       0      0 :::10255                :::*                    LISTEN      10235/kubelet       
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd           
tcp6       0      0 :::6000                 :::*                    LISTEN      9918/X              
tcp6       0      0 :::22                   :::*                    LISTEN      9594/sshd           
tcp6       0      0 ::1:631                 :::*                    LISTEN      9600/cupsd          
tcp6       0      0 :::7480                 :::*                    LISTEN      54454/radosgw       
tcp6       0      0 ::1:25                  :::*                    LISTEN      10038/master        
tcp6       0      0 :::4194                 :::*                    LISTEN      10235/kubelet       
[root@node02 ~]# 
[root@node02 ~]# ceph -s

  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 73m)
    mgr: node01(active, since 25m), standbys: node03, node02
    osd: 9 osds: 9 up (since 28m), 9 in (since 28m)
    rgw: 3 daemons active (node01, node02, node03)
 
  task status:
 
  data:
    pools:   4 pools, 128 pgs
    objects: 187 objects, 1.2 KiB
    usage:   9.1 GiB used, 81 GiB / 90 GiB avail
    pgs:     128 active+clean
 
[root@node02 ~]# 

image-20201112155950023

image-20201112160201217

14.ode03查看相关的服务和端口,如图所示,Ceph分布式节点部署成功

[root@node03 ~]# ps -ef|grep ceph

[root@node03 ~]# netstat -tnlp

[root@node03 ~]# ceph -s

[root@node03 ~]# ps -ef|grep ceph
root        9222       1  0 14:57 ?        00:00:00 /usr/bin/python2.7 /usr/bin/ceph-crash
ceph        9600       1  0 14:57 ?        00:00:08 /usr/bin/ceph-mon -f --cluster ceph --id node03 --setuser ceph --setgroup ceph
ceph       41148       1  0 15:41 ?        00:00:09 /usr/bin/ceph-osd -f --cluster ceph --id 6 --setuser ceph --setgroup ceph
ceph       42129       1  0 15:42 ?        00:00:08 /usr/bin/ceph-osd -f --cluster ceph --id 7 --setuser ceph --setgroup ceph
ceph       42893       1  0 15:42 ?        00:00:07 /usr/bin/ceph-osd -f --cluster ceph --id 8 --setuser ceph --setgroup ceph
ceph       45082       1  0 15:45 ?        00:00:02 /usr/bin/ceph-mgr -f --cluster ceph --id node03 --setuser ceph --setgroup ceph
ceph       46277       1  0 15:47 ?        00:00:03 /usr/bin/radosgw -f --cluster ceph --name client.rgw.node03 --setuser ceph --setgroup ceph
root       62692   11703  0 16:11 pts/0    00:00:00 grep --color=auto ceph
[root@node03 ~]# 
[root@node03 ~]# netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:6823            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      10326/kubelet       
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      9591/kube-proxy     
tcp        0      0 192.168.1.183:2379      0.0.0.0:*               LISTEN      62633/etcd          
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      62633/etcd          
tcp        0      0 127.0.0.1:2380          0.0.0.0:*               LISTEN      62633/etcd          
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 0.0.0.0:6800            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:6000            0.0.0.0:*               LISTEN      9735/X              
tcp        0      0 0.0.0.0:6801            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:6802            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:6803            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:6804            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:6805            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:6806            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      9587/sshd           
tcp        0      0 0.0.0.0:6807            0.0.0.0:*               LISTEN      41148/ceph-osd      
tcp        0      0 127.0.0.1:631           0.0.0.0:*               LISTEN      9584/cupsd          
tcp        0      0 0.0.0.0:7480            0.0.0.0:*               LISTEN      46277/radosgw       
tcp        0      0 0.0.0.0:6808            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6809            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      9757/master         
tcp        0      0 0.0.0.0:6810            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6811            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6812            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6813            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6814            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6815            0.0.0.0:*               LISTEN      42129/ceph-osd      
tcp        0      0 0.0.0.0:6816            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 0.0.0.0:6817            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 0.0.0.0:6818            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 0.0.0.0:6819            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 0.0.0.0:6820            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 192.168.1.183:3300      0.0.0.0:*               LISTEN      9600/ceph-mon       
tcp        0      0 0.0.0.0:6821            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp        0      0 192.168.1.183:6789      0.0.0.0:*               LISTEN      9600/ceph-mon       
tcp        0      0 0.0.0.0:6822            0.0.0.0:*               LISTEN      42893/ceph-osd      
tcp6       0      0 :::10250                :::*                    LISTEN      10326/kubelet       
tcp6       0      0 :::10255                :::*                    LISTEN      10326/kubelet       
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd           
tcp6       0      0 :::6000                 :::*                    LISTEN      9735/X              
tcp6       0      0 :::22                   :::*                    LISTEN      9587/sshd           
tcp6       0      0 ::1:631                 :::*                    LISTEN      9584/cupsd          
tcp6       0      0 :::7480                 :::*                    LISTEN      46277/radosgw       
tcp6       0      0 ::1:25                  :::*                    LISTEN      9757/master         
tcp6       0      0 :::4194                 :::*                    LISTEN      10326/kubelet       
[root@node03 ~]# 
[root@node03 ~]# ceph -s
  cluster:
    id:     c90f3c76-20f0-4091-a66b-74ec0e6f4ec8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node01,node02,node03 (age 73m)
    mgr: node01(active, since 25m), standbys: node03, node02
    osd: 9 osds: 9 up (since 28m), 9 in (since 28m)
    rgw: 3 daemons active (node01, node02, node03)
 
  task status:
 
  data:
    pools:   4 pools, 128 pgs
    objects: 187 objects, 1.2 KiB
    usage:   9.1 GiB used, 81 GiB / 90 GiB avail
    pgs:     128 active+clean
 
[root@node03 ~]# 

image-20201112160026336

image-20201112160231639