简介

Linux持续不断进军可扩展计算空间,特别是可扩展存储空间,Ceph 最近加入到 Linux 中令人印象深刻的文件系统备选行列,它是一个分布式文件系统,能够在维护 POSIX 兼容性的同时加入了复制和容错功能 Ceph 生态系统架构可以划分为四部分: 1、Clients:客户端(数据用户) 2、cmds:Metadata server cluster,元数据服务器(缓存和同步分布式元数据) 3、cosd:Object storage cluster,对象存储集群(将数据和元数据作为对象存储,执行其他关键职能) 4、cmon:Cluster monitors,集群监视器(执行监视功能)

前期准备

准备三台Centos7虚拟机,配置IP地址和hostname,同步系统时间,关闭防火墙和selinux,修改IP地址和hostname映射,每台虚拟机添加一块硬盘

ip hostname
192.168.29.145 controller
192.168.29.146 computer
192.168.29.147 storager

配置openstack可参考:https://blog.51cto.com/14832653/2516210 注:若已经创建openstack集群,需要先把实例,镜像和卷进行删除 安装epel和ceph源

[root@controller ~]# yum install epel-release centos-release-ceph-luminous -y
[root@computer ~]# yum install epel-release centos-release-ceph-luminous -y
[root@storager ~]# yum install epel-release   centos-release-ceph-luminous -y

配置ceph源

[root@controller ~]# vi /etc/yum.repos.d/ceph.repo 
[root@computer ~]# vi /etc/yum.repos.d/ceph.repo 
[root@storager ~]# vi /etc/yum.repos.d/ceph.repo 
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/x86_64
enabled=1
gpgcheck=0
type=rpm-md
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
priority=1

安装ceph组件

[root@controller ~]# yum install ceph ceph-deploy -y
[root@computer ~]# yum install ceph ceph-deploy -y
[root@storager ~]# yum install ceph ceph-deploy -y

computer结点安装libvirt

[root@computer ~]# yum install libvirt -y

部署ceph集群

创建集群

[root@controller ~]# cd /etc/ceph/
[root@controller ceph]# ceph-deploy new controller computer storager

修改配置文件

 [root@controller ceph]# vi ceph.conf 
 [global]
fsid = e5288fdd-d279-414b-8391-e440d82cc925
mon_initial_members = controller, computer, storager
mon_host = 192.168.29.145,192.168.29.146,192.168.29.147
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
#设置三个备份
osd_pool_default_size = 3
#设置集群网段
public_network = 192.168.29.0/24

初始化集群监控

[root@controller ceph]# ceph-deploy mon create-initial  

创建OSD

[root@controller ceph]# ceph-deploy disk zap controller:sdb computer:sdb storager:sdb
[root@controller ceph]# ceph-deploy osd create  controller:sdb computer:sdb storager:sdb

下发admin密钥

[root@controller ceph]# ceph-deploy admin controller computer storager

添加执行权限

[root@controller ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring 
[root@computer ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring 
[root@storager ceph] # chmod +x /etc/ceph/ceph.client.admin.keyring 

创建mgr管理

[root@controller ceph]# ceph-deploy mgr create controller computer storager

查看集群状态

[root@controller ceph]# ceph -s

查看集群容量

[root@controller ceph]# ceph df

创建pool

[root@controller ceph]# ceph osd pool create volumes 64
[root@controller ceph]# ceph osd pool create vms 64
[root@controller ceph]# ceph osd pool create images 64

#设置自启动
[root@controller ceph]# ceph osd pool application enable vms mon
[root@controller ceph]# ceph osd pool application enable images mon
[root@controller ceph]# ceph osd pool application enable volumes mon

查看mon,osd,pool状态

[root@controller ceph]# ceph mon stat
[root@controller ceph]#  ceph osd status
[root@controller ceph]# ceph osd lspools

查看pool情况

[root@controller ~]# rbd ls vms
[root@controller ~]# rbd ls volumes
[root@controller ~]# rbd ls images

ceph集群与openstack对接

创建cinder和glance并设置权限

[root@controller ceph]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images'
[root@controller ceph]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=images'

设置密钥

[root@controller ceph]# ceph auth get-or-create client.glance |tee /etc/ceph/ceph.client.glance.keyring 
[root@controller ceph]# ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring

#传送密钥到computer
[root@controller ~]# ceph auth get-key client.cinder > client.cinder.key
[root@controller ~]# scp client.cinder.key computer:/root/

#修改权限
[root@controller ceph]# chown  glance.glance /etc/ceph/ceph.client.glance.keyring 
[root@controller ceph]# chown  cinder.cinder /etc/ceph/ceph.client.cinder.keyring

设置密钥

#computer生成uuid
[root@computer ~]#uuidgen
1fad1f90-63fb-4c15-bfc3-366c6559c1fe 

#创建密钥文件
[root@computer ~]# vi secret.xml
 <secret ephemeral='no' private='no'>
   <uuid>1fad1f90-63fb-4c15-bfc3-366c6559c1fe </uuid>
   <usage type='ceph'>
    <name>client.cinder secret</name>
   </usage>
</secret>

#定义密钥
virsh secret-define --file secret.xml

#设置密钥
virsh secret-set-value --secret 1fad1f90-63fb-4c15-bfc3-366c6559c1fe  --base64 $(cat client.cinder.key) && rm -rf client.cinder.key secret.xml

设置对接glance模块

修改配置文件

[root@controller ~]# vi /etc/glance/glance-api.conf 
[glance_store]
#默认配置需要注释掉
stores = rbd
default_store = rbd
rbd_store_chunk_size = 8
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf

重启服务

[root@controller ~]# systemctl restart openstack-glance-api

设置对接cinder模块

修改配置文件

[root@controller ~]# vi /etc/cinder/cinder.conf
[default]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.29.145
enabled_backends = ceph

[ceph]
default_volume_type= ceph
glance_api_version = 2  
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
#对应computer创建的uuid
rbd_secret_uuid = 1fad1f90-63fb-4c15-bfc3-366c6559c1fe 

同步数据库

#若已经有数据库,对数据库进行删除并重新创建和同步
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder

重启服务

[root@controller ~]# systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service

设置ceph的类型和存储类型

[root@controller ~]# source admin-openrc 
[root@controller ~]# cinder type-create  ceph 
[root@controller ~]# cinder type-key ceph set volume_backend_name=ceph

对接nova-compute模块

computer结点修改配置文件

[root@computer ~]# vi /etc.nova/nova.conf
[libvirt]
virt_type = qemu
inject_password = true
inject_partition = -1
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 1fad1f90-63fb-4c15-bfc3-366c6559c1fe
disk_cachemodes = "network=writeback"
live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
hw_disk_discard = unmap
[root@computer ~]# vi /etc/ceph/ceph.conf 
[client]
rbd cache=true
rbd cache writethrough until flush=true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20

创建日志目录

[root@computer ~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/
[root@computer ~]# chown 777 -R /var/run/ceph/guests/ /var/log/qemu/

controller下发密钥

[root@controller ~]# cd /etc/ceph
[root@controller ~]# scp ceph.client.cinder.keyring root@computer:/etc/ceph
[root@controller ~]# scp ceph.client.cinder.keyring root@storager:/etc/ceph

重启服务

[root@computer ~]# systemctl stop libvirtd openstack-nova-compute
[root@computer ~]# systemctl start libvirtd openstack-nova-compute