一 摘要
前期学习,部署了一套openstack mitaka 版本,基于rpm 包手工部署。控制节点实现三台集群,计算节点、存储节点共用一台物理机。
二 环境信息
(一)操作系统版本
集群里所有节点版本相同
[root@controller1 ~]# cat /etc/centos-release
CentOS Linux release 7.7.1908 (Core)
[root@controller1 ~]# uname -a
Linux controller1 3.10.0-1062.el7.x86_64 #1 SMP Wed Aug 7 18:08:02 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
[root@controller1 ~]#
(二)openstack 版本
产品版本是opennstack mitaka,各rpm 包版本如下
[root@controller1 ~]# openstack --version
openstack 2.3.1
[root@controller1 ~]# rpm -qa | grep openstack
openstack-keystone-9.3.0-1.el7.noarch
openstack-nova-api-13.1.4-1.el7.noarch
openstack-nova-console-13.1.4-1.el7.noarch
openstack-neutron-ml2-8.4.0-1.el7.noarch
python-django-openstack-auth-2.2.1-1.el7.noarch
openstack-utils-2017.1-1.el7.noarch
openstack-nova-scheduler-13.1.4-1.el7.noarch
openstack-neutron-8.4.0-1.el7.noarch
openstack-dashboard-9.1.2-1.el7.noarch
openstack-nova-common-13.1.4-1.el7.noarch
openstack-glance-12.0.0-1.el7.noarch
openstack-neutron-linuxbridge-8.4.0-1.el7.noarch
openstack-nova-compute-13.1.4-1.el7.noarch
python-openstackclient-2.3.1-2.el7.noarch
openstack-nova-conductor-13.1.4-1.el7.noarch
openstack-neutron-common-8.4.0-1.el7.noarch
python2-openstacksdk-0.8.3-1.el7.noarch
openstack-nova-novncproxy-13.1.4-1.el7.noarch
[root@controller1 ~]#
(三)待扩容物理机基本信息
系统用途 | 品牌 | 型号 | 序列号 | IP | 配置 |
compute20.yun.digital.com | Inspur | SA5212M5 | XXXXXXX | 10.3.176.44 | Intel Silver 4210 2.2G * 2; 内存:总256G 32G8 ;硬盘:240G SSD * 2 、8T/SATA/7.2K * 6;网卡:网卡 X710 10G 1 |
三 计算节点扩容
(一)安装操作系统
3.1.1 设置raid
系统盘 设置为raid1;数据盘设置为raid5;
3.1.2 安装系统
安装系统时,只需把home 目录设置为50G,swap、boot 等按推荐设置,其余的放到根目录;
[root@localhost ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 126G 0 126G 0% /dev
tmpfs 126G 0 126G 0% /dev/shm
tmpfs 126G 11M 126G 1% /run
tmpfs 126G 0 126G 0% /sys/fs/cgroup
/dev/mapper/centos-root 160G 4.0G 157G 3% /
/dev/sda2 1014M 168M 847M 17% /boot
/dev/sda1 200M 12M 189M 6% /boot/efi
/dev/mapper/centos-home 50G 38M 50G 1% /home
tmpfs 26G 4.0K 26G 1% /run/user/42
tmpfs 26G 28K 26G 1% /run/user/1000
tmpfs 26G 0 26G 0% /run/user/0
[root@localhost ~]#
(二)基础环境准备
3.2.1 内网dns 服务器上配置计算节点域名
在内网dns 服务器上配置该机器dns
修改10.3.170.32 dns 服务器上/var/named/digital.com.zone
[root@10-3-170-32 named]# cp digital.com.zone digital.com.zone.bak.20210220
[root@10-3-170-32 named]# vim digital.com.zone
[root@10-3-170-32 named]# pwd
/var/named
[root@10-3-170-32 named]#
compute20.yun IN A 10.3.176.44
重启dns 服务器
[root@10-3-170-32 named]# systemctl restart named
[root@10-3-170-32 named]# systemctl status named
● named.service - Berkeley Internet Name Domain (DNS)
Loaded: loaded (/usr/lib/systemd/system/named.service; enabled; vendor preset: disabled)
Active: active (running) since 六 2021-02-20 10:39:00 CST; 6s ago
Process: 72600 ExecStop=/bin/sh -c /usr/sbin/rndc stop > /dev/null 2>&1 || /bin/kill -TERM $MAINPID (code=exited, status=0/SUCCESS)
Process: 82828 ExecReload=/bin/sh -c /usr/sbin/rndc reload > /dev/null 2>&1 || /bin/kill -HUP $MAINPID (code=exited, status=0/SUCCESS)
Process: 72620 ExecStart=/usr/sbin/named -u named -c ${NAMEDCONF} $OPTIONS (code=exited, status=0/SUCCESS)
Process: 72616 ExecStartPre=/bin/bash -c if [ ! "$DISABLE_ZONE_CHECKING" == "yes" ]; then /usr/sbin/named-checkconf -z "$NAMEDCONF"; else echo "Checking of zone files is disabled"; fi (code=exited, status=0/SUCCESS)
Main PID: 72621 (named)
Memory: 150.7M
CGroup: /system.slice/named.service
└─72621 /usr/sbin/named -u named -c /etc/named.conf
验证:
[root@10-3-170-32 named]# ping compute20.yun.digital.com
PING compute20.yun.digital.com (10.3.176.44) 56(84) bytes of data.
64 bytes from 10.3.176.44 (10.3.176.44): icmp_seq=1 ttl=63 time=0.121 ms
64 bytes from 10.3.176.44 (10.3.176.44): icmp_seq=2 ttl=63 time=0.098 ms
64 bytes from 10.3.176.44 (10.3.176.44): icmp_seq=3 ttl=63 time=0.089 ms
64 bytes from 10.3.176.44 (10.3.176.44): icmp_seq=4 ttl=63 time=0.081 ms
64 bytes from 10.3.176.44 (10.3.176.44): icmp_seq=5 ttl=63 time=0.119 ms
64 bytes from 10.3.176.44 (10.3.176.44): icmp_seq=6 ttl=63 time=0.087 ms
3.2.2 计算节点配置dns
计算节点配置内网dns 服务器地址。
centos7.7 配置dns,我用的是修改/etc/NetworkManager/NetworkManager.conf
[main]
plugins=ifcfg-rh
dns=none
[logging]
#level=DEBUG
#domains=ALL
新增/etc/resolv.conf
# Generated by NetworkManager
nameserver 10.3.157.201
重启NetworkManager
整个操作封装在 ansible 脚本里。
[dev@10-3-170-32 base]$ ansible-playbook modifydns.yml
3.2.3 计算节点更新内网yum 源
更新内网操作系统yum 源
[dev@10-3-170-32 base]$ ansible-playbook updateyum.yml
更新openstack M 版内网yum 源
[dev@10-3-170-32 base]$ ansible-playbook updateopenstackyum.yml
验证:
[root@localhost yum.repos.d]# ll
总用量 12
-rw-r--r--. 1 root root 501 2月 20 10:55 CentOS-77.repo
-rw-r--r--. 1 root root 90 2月 20 10:55 docker.repo
-rw-r--r--. 1 root root 94 2月 20 10:58 openstack.repo
[root@localhost yum.repos.d]# pwd
/etc/yum.repos.d
[root@localhost yum.repos.d]#
3.2.4 关闭防火墙及selinux
ansible脚本封装
[dev@10-3-170-32 base]$ ansible-playbook closefirewalldandselinux.yml
3.2.5 配置时间服务器
[dev@10-3-170-32 base]$ ansible-playbook modifychronyclient.yml
3.2.6 修改计算节点名称
修改完名称,并重启下机器,至此 准备工作已完成。
[root@localhost yum.repos.d]# hostnamectl set-hostname compute20.yun.digital.com
[root@localhost yum.repos.d]# reboot
(三)openstack 服务安装及配置文件
3.3.1 安装 OpenStack 客户端及安全策略管理工具
[root@compute20 ~]# yum install python-openstackclient
[root@compute20 ~]# yum install openstack-selinux
3.3.2 安装nova-compute
[root@compute20 ~]# yum install openstack-nova-compute -y
[root@compute20 ~]# yum install openstack-utils.noarch -y
备份原先nova 配置文件
[root@compute20 ~]# cp /etc/nova/nova.conf{,.bak}
[root@compute20 ~]# grep -Ev '^$|#' /etc/nova/nova.conf.bak >/etc/nova/nova.conf
新增配置
DEFAULT my_ip 10.3.176.44 改为计算节点配置
openstack-config --set /etc/nova/nova.conf DEFAULT cpu_allocation_ratio 16.0
openstack-config --set /etc/nova/nova.conf DEFAULT ram_allocation_ratio 1.5
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller1.yun.digital.com:5672,openstack:RABBIT_PASS@controller2.yun.digital.com:5672,openstack:RABBIT_PASS@controller3.yun.digital.com:5672
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 10.3.176.44
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller0.yun.digital.com:9292
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller0.yun.digital.com:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller0.yun.digital.com:35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller1.yun.digital.com:11211,controller2.yun.digital.com:11211,controller3.yun.digital.com:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf vnc enabled True
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller0.yun.digital.com:6080/vnc_auto.html
openstack-config --set /etc/nova/nova.conf cache backend oslo_cache.memcache_pool
openstack-config --set /etc/nova/nova.conf cache enabled true
openstack-config --set /etc/nova/nova.conf cache memcache_servers controller1.yun.digital.com:11211,controller2.yun.digital.com:11211,controller3.yun.digital.com:11211
3.3.3 安装neutron-linuxbridge-agent
[root@compute20 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y
备份配置文件/etc/neutron/neutron.conf
[root@compute20 ~]# cp /etc/neutron/neutron.conf{,.bak}
[root@compute20 ~]# grep '^[a-Z\[]' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf
[root@compute20 ~]#
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller1.yun.digital.com:5672,openstack:RABBIT_PASS@controller2.yun.digital.com:5672,openstack:RABBIT_PASS@controller3.yun.digital.com:5672
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller0.yun.digital.com:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller0.yun.digital.com:35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller1.yun.digital.com:11211,controller2.yun.digital.com:11211,controller3.yun.digital.com:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
备份配置文件 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[root@compute20 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
[root@compute20 ~]# grep '^[a-Z\[]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
[root@compute20 ~]#
linux_bridge physical_interface_mappings provider:eno1
eno1 要改为您对应的网卡名称
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:eno1
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
再次修改 nova 配置文件
先备份
[root@compute20 ~]# cp /etc/nova/nova.conf{,.bak`date +"%Y%m%d%H%m%S"`}
[root@compute20 ~]#
openstack-config --set /etc/nova/nova.conf neutron url http://controller0.yun.digital.com:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller0.yun.digital.com:35357
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
3.3.4 启动服务并设置开机启动
[root@compute20 ~]# systemctl start libvirtd openstack-nova-compute neutron-linuxbridge-agent
[root@compute20 ~]# systemctl enable libvirtd openstack-nova-compute neutron-linuxbridge-agent
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@compute20 ~]#
3.3.5 验证
控制节点验证
[root@controller1 ~]# source admin-openrc
[root@controller1 ~]# nova service-list | grep compute20
| 335 | nova-compute | compute20.yun.digital.com | nova | enabled | up | 2021-02-20T05:43:01.000000 | - |
[root@controller1 ~]#
也可以到该计算节点执行
[root@compute20 ~]# nova service-list | grep compute20
| 335 | nova-compute | compute20.yun.digital.com | nova | enabled | up | 2021-02-20T05:42:11.000000 | - |
[root@compute20 ~]#