环境准备
|操作系统 | 版 本| 主机名称 |CPU|MEM|硬盘1|硬盘2|ens33|ens34|ens35 | | -------- | -------- | -------- | -------- | -------- | -------- | | CentOS | 7 | deploy |4core| 4GB | 60G |无 |89.6 | 无 | 无 | CentOS | 7 | controller01 |8core| 12GB | 60G | 100G |89.10 | 90.10 | 无 | CentOS | 7 | controller02 |8core| 12GB | 60G | 100G |89.11 | 90.11 | 无 | CentOS | 7 | controller03 |8core| 12GB | 60G | 100G |89.12 | 90.12 | 无 | CentOS | 7 | compute01 |8core| 16GB | 60G | 100G |89.13 | 90.13 | 无 | CentOS | 7 | compute01 |8core| 16GB | 60G | 100G |89.14 | 90.14 | 无 | CentOS | 7 | compute01 |8core| 16GB | 60G | 100G |89.15 | 90.15 | 无 |vip| | | | | | | 89.100| | | | ens33 部署网络 ens34 存储网络 ens35网卡为外部网络 (external network) ,让neutron的br-ex 绑定使用,openstack中的虚拟机是通过这块网卡和外网通信的
组件介绍
⦁ Keystone (身份认证)
⦁ Nova (计算)
⦁ Neutron (网络)
⦁ Glance (镜像存储)
⦁ Cinder (块存储)
⦁ Swift (对象存储)
⦁ Horizon (web UI界面)
⦁ Ceilometer (计量)
⦁ Heat (部署编排)
⦁ Trove (数据库)
[openstack官网链接](https://docs.openstack.org/stein/index.html)
开始部署基础环境
所有节点(controller01\02\03、compute01\02\03)都需要关闭selinux、firewalld、安装ntpdate(ceph对时间要求非常高)
#关闭selinux
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config
#关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
#安装ntpdate 同步阿里时间
yum install ntpdate -y && ntpdate ntp1.aliyun.com
#安装其他依赖
yum install epel-release -y
yum install git python-devel libffi-devel gcc openssl-devel git python-pip yum-utils device-mapper-persistent-data lvm2 -y
#停用libvirtd服务
systemctl stop libvirtd.service && systemctl disable libvirtd.service && systemctl status libvirtd.service
#设置主机名称
cat > /etc/hosts<< EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.89.10 controller01
192.168.89.11 controller02
192.168.89.12 controller03
192.168.89.13 compute01
192.168.89.14 compute02
192.168.89.15 compute03
EOF
#pip改成国内镜像仓库
mkdir ~/.pip
tee ~/.pip/pip.conf << 'EOF'
[global]
index-url = http://mirrors.aliyun.com/pypi/simple/
[install]
trusted-host=mirrors.aliyun.com
EOF
#如果是在虚拟机里装kolla,希望可以启动再启动虚拟机,那么你需要把virt_type=qemu,默认是kvm。如果vmware开了“虚拟化Intel VT”功能。
mkdir -p /etc/kolla/config/nova
cat > /etc/kolla/config/nova/nova-compute.conf << EOF
[libvirt]
virt_type=qemu
cpu_mode = none
EOF
#使用aliyun镜像加速
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://e9yneuy4.mirror.aliyuncs.com"]
}
EOF
#docker挂在共享存储设置(如果后端使用ceph这部可以省略)
mkdir /etc/systemd/system/docker.service.d
tee /etc/systemd/system/docker.service.d/kolla.conf << 'EOF'
[Service]
MountFlags=shared
EOF
**装docker-ce 18.06
#下载yum源
curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum clean all
yum makecache
#查找安装版本
[root@localhost ~]# yum search docker --showduplicates| grep docker-ce| grep 18.06
docker-ce-18.06.0.ce-3.el7.x86_64 : The open-source application container engine
docker-ce-18.06.1.ce-3.el7.x86_64 : The open-source application container engine
docker-ce-18.06.2.ce-3.el7.x86_64 : The open-source application container engine
docker-ce-18.06.3.ce-3.el7.x86_64 : The open-source application container engine
#安装
yum install docker-ce-18.03.1.ce-1.el7.centos.x86_64 -y
#重启相关服务
systemctl daemon-reload && systemctl enable docker && systemctl restart docker && systemctl status docker
deploy部署节点安装
#安装pip及一些依赖
yum install epel-release -y
yum install python-devel libffi-devel gcc openssl-devel git sshpass python-pip -y
cat > deploy_stein_requirements.txt << EOF
ansible==2.5.0
appdirs==1.4.4
Babel==2.9.0
backports.functools-lru-cache==1.6.1
#backports.ssl-match-hostname==3.7.0.1
bcrypt==3.1.7
certifi==2020.12.5
cffi==1.14.5
chardet==4.0.0
cliff==2.18.0
cmd2==0.8.9
contextlib2==0.6.0.post1
cryptography==2.9
debtcollector==1.22.0
decorator==4.4.2
docker==4.4.1
dogpile.cache==0.9.2
enum34==1.1.10
funcsigs==1.0.2
futures==3.3.0
gitdb2==2.0.6
GitPython==2.1.15
idna==2.10
importlib-resources==3.3.1
iso8601==0.1.14
Jinja2==2.11.3
jmespath==0.10.0
jsonpatch==1.28
jsonpointer==2.0
keystoneauth1==4.0.1
kolla-ansible==8.3.0
MarkupSafe==1.1.1
monotonic==1.5
msgpack==1.0.2
munch==2.5.0
netaddr==0.8.0
netifaces==0.10.9
openstacksdk==0.45.0
os-service-types==1.7.0
osc-lib==2.0.0
oslo.config==7.0.0
oslo.i18n==3.25.1
oslo.serialization==2.29.3
oslo.utils==3.42.1
packaging==20.9
paramiko==2.7.2
pathlib2==2.3.5
pbr==5.5.1
prettytable==0.7.2
pycparser==2.20
PyNaCl==1.4.0
pyparsing==2.4.7
pyperclip==1.8.1
python-cinderclient==6.0.0
python-keystoneclient==3.22.0
python-novaclient==16.0.0
python-openstackclient==5.2.1
pytz==2021.1
PyYAML==5.4.1
requests==2.25.1
requestsexceptions==1.4.0
rfc3986==1.4.0
scandir==1.10.0
simplejson==3.17.2
singledispatch==3.4.0.3
six==1.15.0
smmap==3.0.5
smmap2==3.0.1
stevedore==1.32.0
subprocess32==3.5.4
typing==3.7.4.3
unicodecsv==0.14.1
urllib3==1.26.3
wcwidth==0.2.5
websocket-client==0.57.0
wrapt==1.12.1
zipp==1.2.0
EOF
#安装完毕后查看ansible和kolla-ansible版本
# pip list | grep ansible
ansible 2.5.0
kolla-ansible 8.3.0
#修改ansible配置文件
cat > /etc/ansible/ansible.cfg << EOF
[defaults]
host_key_checking=False
pipelining=True
forks=100
EOF
#开启免密登陆
ssh-keygen 一路回车
#做本地解析
cat > /etc/hosts<< EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.89.10 controller01
192.168.89.11 controller02
192.168.89.12 controller03
192.168.89.13 compute01
192.168.89.14 compute02
192.168.89.15 compute03
EOF
#生成ansible hosts文件
mkdir /etc/ansible
cat > /etc/ansible/hosts << EOF
[master]
controller0[1:3]
[compute]
compute0[1:3]
EOF
#ansible认证各个节点
ansible all -m authorized_key -a"manage_dir=yes user=root key={{ lookup('file','/root/.ssh/id_rsa.pub') }}" -k
SSH password:输入密码
#确认是否认证成功
[root@deploy ~]# ansible all -m ping -o
compute02 | SUCCESS => {"changed": false, "ping": "pong"}
compute03 | SUCCESS => {"changed": false, "ping": "pong"}
controller03 | SUCCESS => {"changed": false, "ping": "pong"}
controller02 | SUCCESS => {"changed": false, "ping": "pong"}
compute01 | SUCCESS => {"changed": false, "ping": "pong"}
controller01 | SUCCESS => {"changed": false, "ping": "pong"}
ceph部署
ceph部署图
|操作系统 | 版 本| 主机名称 |CPU|MEM|硬盘1|ceph-osd|ens33管理网络|ens34集群网络
| -------- | -------- | -------- | -------- | -------- | -------- |
| CentOS | 7 | controller01 |8core| 8GB | 60G | 100G |89.10 | 90.10
| CentOS | 7 | controller02 |8core| 8GB | 60G | 100G |89.11 | 90.11
| CentOS | 7 | controller03 |8core| 8GB | 60G | 100G |89.12 | 90.12
| CentOS | 7 | compute01 |8core| 16GB | 60G | 100G |89.13 | 90.13
| CentOS | 7 | compute01 |8core| 16GB | 60G | 100G |89.14 | 90.14
| CentOS | 7 | compute01 |8core| 16GB | 60G | 100G |89.15 | 90.15
###controller01生成密钥可以免密登陆其他主机 ssh-keygen 一路回车 然后仿照部署deploy认证各个节点(省略)
ceph采用ceph-deploy部署 版本:2.0.1
#controller01安装此组件
#ceph1部署应用
安装部署工具
yum install ceph-deploy -y
#创建目录
mkdir /etc/ceph
#生成配置文件
cd /etc/ceph
ceph-deploy new 主机名(ceph1)
vim /etc/ceph/ceph.conf
[global]
fsid = f57adcaa-2eec-42d1-9154-4608dae9e416
mon_initial_members = controller01
mon_host = 192.168.89.10
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 192.168.89.0/24
cluster network = 192.168.90.0/24
#安装软件
ceph-deploy install --no-adjust-repos controller01 controller02 controller03 compute01 compute02 compute03
#初始化mon
ceph-deploy mon create-initial
ceph-deploy mgr create ceph1
#同步配置文件到所有ceph节点
ceph-deploy admin controller01 controller02 controller03 compute01 compute02 compute03
#查看仲裁节点
ceph quorum_status --format json-pretty
#创建磁盘并加入集群
ceph-deploy disk list ceph1
ceph-deploy disk list ceph2
ceph-deploy disk list ceph3
#格式化磁盘
ceph-deploy disk zap controller01 /dev/sdb
ceph-deploy disk zap controller02 /dev/sdb
ceph-deploy disk zap controller03 /dev/sdb
ceph-deploy disk zap compute01 /dev/sdb
ceph-deploy disk zap compute02 /dev/sdb
ceph-deploy disk zap compute03 /dev/sdb
#创建osd磁盘
ceph-deploy osd create --data /dev/sdb controller01
ceph-deploy osd create --data /dev/sdb controller02
ceph-deploy osd create --data /dev/sdb controller03
ceph-deploy osd create --data /dev/sdb compute01
ceph-deploy osd create --data /dev/sdb compute02
ceph-deploy osd create --data /dev/sdb compute03
#查看集群健康状态(1个mon1个mgr6个osd)
[root@controller01 ceph]# ceph -s
cluster:
id: f57adcaa-2eec-42d1-9154-4608dae9e416
health: HEALTH_OK
services:
mon: 1 daemons, quorum controller01
mgr: controller01(active), standbys: controller02, controller03
osd: 6 osds: 6 up, 6 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 6.02GiB used, 594GiB / 600GiB avail
pgs:
#ceph创建关于池的密钥
ceph osd pool create images 64
ceph osd pool create volumes 64
ceph osd pool create backups 64
ceph osd pool create vms 64
ceph auth get-or-create client.glance | ssh controller01 tee /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh controller01 tee /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder-backup | ssh controller01 tee /etc/ceph/ceph.client.cinder-backup.keyring
ceph auth get-or-create client.nova | ssh controller01 tee /etc/ceph/ceph.client.nova.keyring
#给用户授权
ceph auth caps client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=backups'
ceph auth caps client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=backups'
ceph auth caps client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=backups'
ceph auth caps client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=backups'
ceph部署完毕
回到deploy节点继续部署
#创建配置文件
mkdir -pv /etc/kolla/config/{cinder/{cinder-volume,cinder-backup},glance,nova}
mkdir: 已创建目录 "/etc/kolla/config"
mkdir: 已创建目录 "/etc/kolla/config/cinder"
mkdir: 已创建目录 "/etc/kolla/config/cinder/cinder-volume"
mkdir: 已创建目录 "/etc/kolla/config/cinder/cinder-backup"
mkdir: 已创建目录 "/etc/kolla/config/glance"
mkdir: 已创建目录 "/etc/kolla/config/nova"
#拷贝kolla配置文件到指定目录
cp /usr//share/kolla-ansible/etc_examples/kolla/* /etc/kolla
cp /usr/share/kolla-ansible/ansible/inventory/* /root/
#ls -l /etc/kolla
[root@deploy kolla]# ls /etc/kolla/
config globals.yml passwords.yml
#ls -l /root
multinode all-in-one
#修改/root/multinode
#控制节点
[control]
controller[01:03]
#网络节点
[network]
controller[01:03]
#计算节点
[compute]
compute[01:03]
#监控节点
[monitoring]
controller01
#存储节点
[storage]
controller01
#编辑vim /etc/kolla/globals.yml
kolla_base_distro: "centos"
kolla_install_type: "source"
openstack_release: "stein"
kolla_internal_vip_address: "192.168.89.100"
network_interface: "ens33"
neutron_external_interface: "ens35"
keepalived_virtual_router_id: "51"
enable_haproxy: "yes"
enable_ceph: "no"
enable_cinder: "yes"
enable_cinder_backup: "yes"
enable_fluentd: "yes"
enable_openstack_core: "yes"
glance_backend_ceph: "yes"
glance_backend_file: "no"
glance_enable_rolling_upgrade: "no"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
nova_compute_virt_type: "kvm"
ironic_dnsmasq_dhcp_range:
tempest_image_id:
tempest_flavor_ref_id:
tempest_public_network_id:
tempest_floating_network_name:
#执行kolla-genpwd
vim /etc/kolla/passwords.yml
修改keystone_admin_password: 登陆密码
拷贝ceph配置文件到deploy节点
scp controller01:/etc/ceph/ceph.conf /etc/kolla/config/cinder/ceph.conf
scp controller01:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-volume/ceph.client.cinder.keyring
scp controller01:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-backup/ceph.client.cinder.keyring
scp controller01:/etc/ceph/ceph.client.cinder-backup.keyring /etc/kolla/config/cinder/cinder-backup/ceph.client.cinder-backup.keyring
scp controller01:/etc/ceph/ceph.client.glance.keyring /etc/kolla/config/glance/ceph.client.glance.keyring
scp controller01:/etc/ceph/ceph.conf /etc/kolla/config/nova/ceph.conf
scp controller01:/etc/ceph/ceph.client.nova.keyring /etc/kolla/config/nova/ceph.client.nova.keyring
scp controller01:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/nova/ceph.client.cinder.keyring
成配置文件(这些文件将被kolla-ansible复制到docker里面运行)
export cinder_rbd_secret_uuid=$(grep cinder_rbd_secret_uuid /etc/kolla/passwords.yml | awk '{print $2}') tee /etc/kolla/config/cinder/cinder-volume.conf << EOF [DEFAULT] enabled_backends=rbd [rbd] rbd_ceph_conf=/etc/ceph/ceph.conf rbd_user=cinder backend_host=rbd:volumes rbd_pool=volumes volume_backend_name=rbd volume_driver=cinder.volume.drivers.rbd.RBDDriver rbd_secret_uuid = $cinder_rbd_secret_uuid EOF cp /etc/kolla/config/cinder/cinder-volume.conf /etc/kolla/config/cinder/cinder-volume/
tee /etc/kolla/config/cinder/cinder-backup/cinder-backup.conf << EOF [DEFAULT] backup_ceph_conf=/etc/ceph/ceph.conf backup_ceph_user=cinder backup_ceph_chunk_size = 134217728 backup_ceph_pool=backups backup_driver = cinder.backup.drivers.ceph.CephBackupDriver backup_ceph_stripe_unit = 0 backup_ceph_stripe_count = 0 restore_discard_excess_bytes = true EOF
tee /etc/kolla/config/glance/glance-api.conf << EOF [DEFAULT] show_multiple_locations = true [glance_store] stores = rbd default_store = rbd rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_chunk_size = 8 EOF cp /etc/kolla/cp config/cinder/ceph.conf /etc/kolla/config/glance/
tee /etc/kolla/config/nova/nova-compute.conf << EOF [DEFAULT] compute_driver=libvirt.LibvirtDriver [libvirt] images_rbd_pool=vms images_type=rbd images_rbd_ceph_conf=/etc/ceph/ceph.conf rbd_user=nova disk_cachemodes="network=writeback" inject_password = false libvirt_inject_partition = -2 inject_key = false live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED" EOF
部署
cd /root
#部署依赖
kolla-ansible -i /root/multinode bootstrap-servers
#测试部署
kolla-ansible -i /root/multinode prechecks
#拉取镜像(注意:拉取镜像失败换dns多试几遍即可拉去成功DNS1=223.6.6.6)
kolla-ansible -i /root/multinode pull
#正式部署
kolla-ansible -i /root/multinode deploy
#生成admin-openrc.sh
kolla-ansible -i /root/multinode post-deploy
置网络
#拉去下载镜像,配置网络分配地址(浮动ip) vim /usr/share/kolla-ansible/init-runonce 19 EXT_NET_CIDR='192.168.89.0/24' 20 EXT_NET_RANGE='start=192.168.89.180,end=192.168.89.198' 21 EXT_NET_GATEWAY='192.168.89.2' #然后执行 /usr/share/kolla-ansible/init-runonce
#报错queue没有这个模块
编辑
vim /usr/lib/python2.7/site-packages/openstack/utils.py
vim /usr/lib/python2.7/site-packages/openstack/cloud/openstackcloud.py
添加下面的头部西南西
import sys
if sys.version > '3':
import queue
else:
import Queue as queue
#报错:已经安装 PyYAML 3.10,但是我们需要PyYAML Found existing installation: PyYAML 3.10 Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall. #注:找到现有安装:PyYAML 3.10 无法卸载'PyYAML'。 这是一个distutils安装的项目,因此我们无法准确确定哪些文件属于它,这 将导致仅部分卸载。
pip install PyYAML --ignore-installed PyYAML #忽略安装的 PyYAML,迚行安装
pip install python-openstackclient
如果有错误的还请大家指正,我们一起探讨,一起进步。
友情链接错误展示 https://blog.csdn.net/zongzw/article/details/106952948