部署Ceph集群

#!/bin/bash
#AUTHOR:AN
#VERSION:1.1.0
#DATE:2019-06-08
#MODIFY:
#FUNCTION:部署Ceph集群
#DESCRIBE:
#NOTICES:每台服务器至少需要4块硬盘

#加载配置文件
source /cloud_nsd/conf/ceph.conf
#加载函数库
if [ -f "$Script_Path/myfunction.lib" ];then
source $Script_Path/myfunction.lib
else
echo -e "\033[31m函数库不存在\033[0m"
exit $NOEXIST
fi

IP=`ifconfig |awk '/inet /{print $2}' |sed -n '1p'` #提取本机的一个有效IP
Network=`echo ${IP%.*}` #提取网段

#############################################################
#配置YUM仓库,确保仓库文件已通过ftp服务器共享
CONF_YUM(){
[ ! -f $Yum_Conf ] && touch $Yum_Conf
> $Yum_Conf
cat >> $Yum_Conf << EOF
[mon]
name=mon
baseurl=ftp://$Network.254/ceph/MON
enabled=1
gpgcheck=0
[osd]
name=osd
baseurl=ftp://$Network.254/ceph/OSD
enabled=1
gpgcheck=0
[tools]
name=tools
baseurl=ftp://$Network.254/ceph/Tools
enabled=1
gpgcheck=0
EOF
yum clean all &>/dev/null && yum repolist
}

#配置YUM,主机名,时间同步(客户端,MON,OSD)
INIT(){
CONF_YUM #创建YUM仓库
#配置主机名解析
cat >> /etc/hosts << EOF
$Client_Host $Client_Name
$Node1_Host $Node1_Name
$Node2_Host $Node2_Name
$Node3_Host $Node3_Name
EOF
#配置时间同步
echo -en "配置NTP同步......\t\t"
sed -i "/^server/c server $Ntp_Server iburst" /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd &> /dev/null
sleep 5
chronyc sources -v |grep '\^\*' &> /dev/null
[ $? -ne 0 ] && cecho 31 "NTP Is Error" && exit $ISERROR
echo -e "\e[32;1m[OK]\e[0m"
}

#创建MON节点
MON(){
#配置ssh免密登录,修改/cloud_nsd/shell/ip.txt文件的IP地址
bash $Script_Path/Pssh.sh -p
YUM ceph-deploy
MKDIR $Cluster_Dir
cd $Cluster_Dir
#创建Ceph集群配置,在ceph-cluster目录下生成Ceph配置文件
ceph-deploy new $Node1_Name $Node2_Name $Node3_Name
#给所有节点安装ceph相关软件包
for i in $Node1_Name $Node2_Name $Node3_Name
do
ssh $i "yum -y install ceph-mon ceph-osd ceph-mds ceph-radosgw"
done
ceph-deploy mon create-initial && OK MON #初始化所有节点的mon服务,也就是启动mon服务
}

##############################################################
#创建缓存磁盘
BUFFER_DISK(){
rpm -q pssh &>/dev/null || rpm -ivh $Soft_Path/lnmp_soft/pssh-2.3.1-5.el7.noarch.rpm
[ ! -f $Udev_Rule ] && touch $Udev_Rule
> $Udev_Rule
cat >> $Udev_Rule << EOF
ENV{DEVNAME}=="${Buffer_Disk}1",OWNER="ceph",GROUP="ceph"
ENV{DEVNAME}=="${Buffer_Disk}2",OWNER="ceph",GROUP="ceph"
EOF
#vdb1和vdb2这两个分区用来做存储服务器的journal缓存盘
for i in $Node1_Name $Node2_Name $Node3_Name
do
ssh $i "parted $Buffer_Disk mklabel gpt"
ssh $i "parted $Buffer_Disk mkpart primary 1 50%"
ssh $i "parted $Buffer_Disk mkpart primary 50% 100%"
ssh $i "chown ceph:ceph ${Buffer_Disk}1"
ssh $i "chown ceph:ceph ${Buffer_Disk}2"
done
pscp.pssh -vH $Node2_Name $Udev_Rule $Udev_Rule
pscp.pssh -vH $Node3_Name $Udev_Rule $Udev_Rule
}

#初始化磁盘数据(慎用)
INIT_DISK(){
ceph-deploy disk zap $Node1_Name:$Data_Disk1 $Node1_Name:$Data_Disk2
ceph-deploy disk zap $Node2_Name:$Data_Disk1 $Node2_Name:$Data_Disk2
ceph-deploy disk zap $Node3_Name:$Data_Disk1 $Node3_Name:$Data_Disk2
}

#创建OSD节点(1.18)
OSD(){
cd $Cluster_Dir
BUFFER_DISK
INIT_DISK
#创建osd存储设备,vdc为集群提供存储空间,vdb1提供JOURNAL缓存,
#一个存储设备对应一个缓存设备,缓存需要SSD,不需要很大
ceph-deploy osd create $Node1_Name:$Data_Disk1:${Buffer_Disk}1 $Node1_Name:$Data_Disk2:${Buffer_Disk}2
ceph-deploy osd create $Node2_Name:$Data_Disk1:${Buffer_Disk}1 $Node2_Name:$Data_Disk2:${Buffer_Disk}2
ceph-deploy osd create $Node3_Name:$Data_Disk1:${Buffer_Disk}1 $Node3_Name:$Data_Disk2:${Buffer_Disk}2
ceph -s |grep "health HEALTH_OK" &>/dev/null && OK OSD && exit 0
ERROR OSD && exit $ISERROR
}

#帮助信息
HELP(){
cat << EOF
Ceph_Deploy version 1.1.0
Usage: Ceph_Deploy [-h] [-init] [-mon] [-osd]
=======================================================================
optional arguments:
-h 提供帮助信息
-init 初始化配置[客户端,MON,OSD都要执行]
-mon 创建MON节点
-osd 创建OSD节点
EXAMPLE:
bash Ceph_Deploy.sh -init
EOF
}
#############################主程序#############################
[ $# -eq 0 ] && HELP
case $1 in
-h)
HELP;;
-init)
INIT;;
-mon)
MON;;
-osd)
OSD;;
*)
cecho 31 "Invalid option:bash `basename $0` [-h]"
esac

Ceph发布共享服务

#!/bin/bash
#AUTHOR:AN
#VERSION:1.1.0
#DATE:2019-06-08
#MODIFY:
#FUNCTION:Ceph集群的块共享和文件共享

#加载配置文件
source /cloud_nsd/conf/ceph.conf
#加载函数库
if [ -f "$Script_Path/myfunction.lib" ];then
source $Script_Path/myfunction.lib
else
echo -e "\033[31m函数库不存在\033[0m"
exit $NOEXIST
fi

#常用功能
FUNCTION(){
rbd resize --size 7G image --allow-shrink #缩小容量
rbd resize --size 15G image #扩容容量
rbd showmapped #查看挂载镜像信息
rbd snap create image --snap image-snap1 #为image镜像创建快照,快照名称为image-snap1
rbd snap rollback image --snap image-snap1 #还原快照

rbd snap protect image --snap image-snap1 #保护快照
rbd clone image --snap image-snap1 image-clone --image-feature layering #创建克隆镜像
rbd info image-clone #查看克隆镜像与父镜像快照的关系
rbd flatten image-clone #克隆镜像同步父镜像快照数据后可以独立工作
}

#块设备共享
BLOCK_SHARE(){
#创建镜像
rbd create $Image1_Name --image-feature layering --size $Image1_Size #layering是开启COW功能
rbd list |grep "$Image1_Name" &>/dev/null && OK $Image1_Name && exit 0
ERROR $Image1_Name && exit $ISERROR
}

#文件系统共享
FILE_SHARE(){
echo "$Mds_Host $Mds_Name" >> /etc/hosts
scp $Yum_Conf $Mds_Host:$Yum_Conf
ssh $Mds_Host "yum repolist"
scp /etc/chrony.conf $Mds_Host:/etc/chrony.conf
ssh $Mds_Host "systemctl restart chronyd"
ssh $Mds_Host "yum -y install ceph-mds"
cd $Cluster_Dir
ceph-deploy mds create $Mds_Name #给nod4拷贝配置文件,启动mds服务
ceph-deploy admin $Mds_Name #同步配置文件和key

ssh $Mds_Host "ceph osd pool create cephfs_data 128" #创建存储池,对应128个PG
ssh $Mds_Host "ceph osd pool create cephfs_metadata 128" #创建存储池,对应128个PG
#创建Ceph文件系统
ssh $Mds_Host "ceph fs new myfs1 cephfs_metadata cephfs_data"
ceph mds stat |grep 'active' &>/dev/null && OK MDS && exit 0
ERROR MDS && exit $ISERROR
#客户端挂载
#mount -t ceph 192.168.4.11:6789:/ /mnt -o name=admin,secret=AQBTsdRapUxBKRAANXtteNUyoEmQHveb75bISg==
}

#客户端使用Ceph的块共享
CLIENT(){
YUM ceph-common
bash $Script_Path/Pssh.sh -p
scp $Node1_Host:/etc/ceph/ceph.conf /etc/ceph/ #拷贝配置文件(否则不知道集群在哪)
scp $Node1_Host:/etc/ceph/ceph.client.admin.keyring /etc/ceph/ #拷贝连接密钥(否则无连接权限)
rbd map $Connect_Image #连接镜像
rbd showmapped |grep "$Connect_Image" &>/dev/null
if [ $? -eq 0 ];then
cecho 36 "${Connect_Image}连接成功"
else
cecho 31 "${Connect_Image}连接失败" && exit $ISERROR
fi
}

#帮助信息
HELP(){
cat << EOF
Ceph_Service version 1.1.0
Usage: Ceph_Service [-h] [-block] [-client] [-file]
=======================================================================
optional arguments:
-h 提供帮助信息
-block 块共享
-client 客户端使用Ceph的块共享
-file 文件系统共享
EXAMPLE:
bash Ceph_Service.sh -block
EOF
}
#############################主程序#############################
[ $# -eq 0 ] && HELP
case $1 in
-h)
HELP;;
-block)
BLOCK_SHARE;;
-client)
CLIENT;;
-file)
FILE_SHARE;;
*)
cecho 31 "Invalid option:bash `basename $0` [-h]"
esac