1,环境准备 

 

10.0.1.100    cephdeploy

10.0.1.110 cephmon1

10.0.1.120 cephmon2

10.0.1.130 cephosd1

10.0.1.140 cephosd2

10.0.1.150 cephosd3

10.0.1.11     controller

10.0.1.31     compute1

10.0.1.41     block



ceph和 openstack的安装略


2.创建3个pool

ceph osd pool create volumes 128
ceph osd pool create p_w_picpaths 128
ceph osd pool create vms 128

3,拷贝ceph.conf到openstack各节点


ssh {your-openstack-server} sudo tee /etc/ceph/ceph.conf </etc/ceph/ceph.conf


4,安装ceph包 在openstack节点上

glance-api节点

    apt-get install  python-rbd

其他节点nova 和volumes

apt-get install ceph-common

5,设置认证

ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=p_w_picpaths'
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=p_w_picpaths'

6,拷贝认证到各节点

ceph auth get-or-create client.glance | ssh 10.0.1.11 sudo tee /etc/ceph/ceph.client.glance.keyring
ssh 10.0.1.11 sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh 10.0.1.41 sudo tee /etc/ceph/ceph.client.cinder.keyring
ssh 10.0.1.41 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder | ssh 10.0.1.31 sudo tee /etc/ceph/ceph.client.cinder.keyring
ceph auth get-key client.cinder | ssh 10.0.1.31 tee client.cinder.key

5.compute节点配置

#uuidgen

17abcd6f-109d-4e95-8115-4c31a6caa084


#cat > secret.xml <<EOF

<secret ephemeral='no' private='no'>

        <uuid>17abcd6f-109d-4e95-8115-4c31a6caa084</uuid>

        <usage type='ceph'>

                <name>client.cinder secret</name>

        </usage>

</secret>

EOF

#sudo virsh secret-define --file secret.xml

#sudo virsh secret-set-value --secret 17abcd6f-109d-4e95-8115-4c31a6caa084 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml

编辑/etc/ceph/ceph.conf最后加上如下内容

[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20

编辑/etc/nova/nova.conf 在[DEFAULT]字段配置加入如下,uuid参考上面的保持同步

libvirt_p_w_picpaths_type = rbd
libvirt_p_w_picpaths_rbd_pool = vms
libvirt_p_w_picpaths_rbd_ceph_conf = /etc/ceph/ceph.conf
libvirt_disk_cachemodes="network=writeback"
rbd_user = cinder
rbd_secret_uuid = 17abcd6f-109d-4e95-8115-4c31a6caa084
libvirt_inject_password = false
libvirt_inject_key = false
libvirt_inject_partition = -2
libvirt_live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"

设置权限

mkdir -p /var/run/ceph/guests/ /var/log/qemu/
chown libvirt-qemu:libvirtd /var/run/ceph/guests /var/log/qemu/

重启启动nova-compute服务


6,glance节点配置

编辑/etc/glance/glance-api.conf

[DEFAULT]

show_p_w_picpath_direct_url = True


[glance_store]

default_store = rbd

stores = rbd

rbd_store_pool = p_w_picpaths

rbd_store_user = glance

rbd_store_ceph_conf = /etc/ceph/ceph.conf

rbd_store_chunk_size = 8


重启glance-api服务


7,cinder配置  vim /etc/cinder/cinder.conf  uuid和之前一致


[DEFAULT]

enabled_backends = ceph

[ceph]

volume_driver = cinder.volume.drivers.rbd.RBDDriver

rbd_pool = volumes

rbd_ceph_conf = /etc/ceph/ceph.conf

rbd_flatten_volume_from_snapshot = false

rbd_max_clone_depth = 5

rbd_store_chunk_size = 4

rados_connect_timeout = -1

glance_api_version = 2

rbd_user = cinder

rbd_secret_uuid = 17abcd6f-109d-4e95-8115-4c31a6caa084

重启cinder-volumes服务

效果测试

openstack kilo 集成ceph_ceph

我创建一个50g的硬盘 并连接c2

在这台主机上我们查看这个硬盘

openstack kilo 集成ceph_openstack_02

在ceph-deploy上查看使用情况,发现volume已经开始在使用了

openstack kilo 集成ceph_ceph_03

写入一个数据,我写入一个2G的文件

openstack kilo 集成ceph_ceph_04

再次查看ceph-delpoy,发现volumes使用如下:

openstack kilo 集成ceph_ceph_05