openstack单节点部署流程U版
 
 
 ’‘’该流程适用于reahat与centos‘’‘ 
 
基本环节配置
1.网络环境(所有节点文件相同)
对于控制节点编辑 /etc/sysconfig/network-scripts/ifcfg-(网卡名)
 DEVICE=网卡名
 TYPE=Ethernet
 ONBOOT=“yes”
 BOOTPROTO=“none”编辑 /etc/hosts,添加当前节点内网ip
 # controller
 10.0.0.11(当前节点ip) controllerchrony 网络时间同步协议
 yum install chrony编辑/etc/chrony.conf,启动时间同步
 server 服务器ip iburst为使其他节点能够连接, 编辑/etc/chrony.conf,配置子网
 例子:10.0.0.0/24验证同步
 chronyc sources添加仓库(当安装 Ussuri release时)
 yum install centos-release-openstack-ussuri
 yum config-manager --set-enabled PowerTools安装OpenStack client
 yum upgrade
 yum install python3-openstackclient安装数据库
 yum install mariadb mariadb-server python2-PyMySQL创建并编辑/etc/my.cnf.d/openstack.cnf
 [mysqld]
 bind-address = 10.0.0.11(当前节点ip)default-storage-engine = innodb
 innodb_file_per_table = on
 max_connections = 4096
 collation-server = utf8_general_ci
 character-set-server = utf8设置数据库并重启
 systemctl enable mariadb.service
 systemctl start mariadb.service设置数据库root密码
 mysql_secure_installation安装消息队列
 yum install rabbitmq-server设置消息队列服务并启动
 systemctl enable rabbitmq-server.service
 systemctl start rabbitmq-server.service消息添加用户设置密码
 rabbitmqctl add_user openstack (密码)消息队列权限设置
 rabbitmqctl set_permissions openstack “." ".” “.*”下载缓存
 yum install memcached python3-memcached
 编辑 /etc/sysconfig/memcached
 OPTIONS="-l 127.0.0.1,::1,controller"设置缓存并启动
 systemctl enable memcached.service
 systemctl start memcached.service安装key-value 数据库etcd
 yum install etcdvi /etc/etcd/etcd.conf
 #[Member]
 ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
 ETCD_LISTEN_PEER_URLS=“http://10.0.0.11:2380”
 ETCD_LISTEN_CLIENT_URLS=“http://10.0.0.11:2379”
 ETCD_NAME=“controller”
 #[Clustering]
 ETCD_INITIAL_ADVERTISE_PEER_URLS=“http://10.0.0.11:2380”
 ETCD_ADVERTISE_CLIENT_URLS=“http://10.0.0.11:2379”
 ETCD_INITIAL_CLUSTER=“controller=http://10.0.0.11:2380”
 ETCD_INITIAL_CLUSTER_TOKEN=“etcd-cluster-01”
 ETCD_INITIAL_CLUSTER_STATE=“new”设置服务并启动
 systemctl enable etcd
 systemctl start etcd#关闭SELinux
 setenforce 0
 sed -i “s/SELINUX=enforcing/SELINUX=disabled/g” /etc/selinux/config#关闭防火墙
 systemctl stop firewalld && systemctl disable firewalldKeystone 验证服务
创建数据库
 mysql -u root -p
 MariaDB [(none)]> CREATE DATABASE keystone;
 创建用户并设置权限
 MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@‘localhost’ 
 IDENTIFIED BY ‘KEYSTONE_DBPASS’;
 MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@’%’ 
 IDENTIFIED BY ‘KEYSTONE_DBPASS’;安装keystone和所需的包
 yum install openstack-keystone httpd mod_wsgi
 修改/etc/keystone/keystone.conf[database]
 # …
 connection = mysql+pymysql://keystone:(密码)@controller/keystone
 [token]
 # …
 provider = fernet补充身份服务数据库
 su -s /bin/sh -c “keystone-manage db_sync” keystone初始化Fernet密钥存储库:
 keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
 keystone-manage credential_setup --keystone-user keystone --keystone-group keystone启动验证服务
 keystone-manage bootstrap --bootstrap-password 123456 
 –bootstrap-admin-url http://controller:5000/v3/ 
 –bootstrap-internal-url http://controller:5000/v3/ 
 –bootstrap-public-url http://controller:5000/v3/ 
 –bootstrap-region-id RegionOne配置Apache HTTP server
 修改/etc/httpd/conf/httpd.conf
 ServerName controller创建链接
 ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/设置并启动服务
 systemctl enable httpd.service
 systemctl start httpd.service创建管理账号
 export OS_USERNAME=admin
 export OS_PASSWORD=123456
 export OS_PROJECT_NAME=admin
 export OS_USER_DOMAIN_NAME=Default
 export OS_PROJECT_DOMAIN_NAME=Default
 export OS_AUTH_URL=http://controller:5000/v3
 export OS_IDENTITY_API_VERSION=3Nova 计算资源管理
创建数据库
 mysql -u root -pCREATE DATABASE nova_api;
 CREATE DATABASE nova;
 CREATE DATABASE nova_cell0;GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@‘localhost’ 
 IDENTIFIED BY ‘123456’;
 GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@’%’ 
 IDENTIFIED BY ‘123456’;
 GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@‘localhost’ 
 IDENTIFIED BY ‘123456’;
 GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@’%’ 
 IDENTIFIED BY ‘123456’;
 GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@‘localhost’ 
 IDENTIFIED BY ‘123456’;
 GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@’%’ 
 IDENTIFIED BY ‘123456’;yum install openstack-nova-api openstack-nova-conductor 
 openstack-nova-novncproxy openstack-nova-scheduler编辑/etc/nova/nova.conf文件并完成以下操作:
 [DEFAULT]
 # …
 enabled_apis = osapi_compute,metadata[api_database]
 # …
 connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api[database]
 # …
 connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova[DEFAULT]
 # …
 transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/[api]
 # …
 auth_strategy = keystone[keystone_authtoken]
 # …
 www_authenticate_uri = http://controller:5000/
 auth_url = http://controller:5000/
 memcached_servers = controller:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = nova
 password = NOVA_PASS[DEFAULT]
 # …
 my_ip = ip修改/etc/nova/nova.conf
 [vnc]
 enabled = true
 # …
 server_listen = $my_ip
 server_proxyclient_address = $my_ip[glance]
 # …
 api_servers = http://controller:9292[oslo_concurrency]
 # …
 lock_path = /var/lib/nova/tmp填充nova-api数据库:
 su -s /bin/sh -c “nova-manage api_db sync” nova
 注册cell0数据库:
 su -s /bin/sh -c “nova-manage cell_v2 map_cell0” nova
 创建cell1单元格:
 su -s /bin/sh -c “nova-manage cell_v2 create_cell --name=cell1 --verbose” nova
 填充nova数据库:
 su -s /bin/sh -c “nova-manage db sync” nova
 验证nova cell0和cell1是否正确注册:
 su -s /bin/sh -c “nova-manage cell_v2 list_cells” nova启动Compute服务并将其配置为在系统启动时启动:
 # systemctl enable \
 openstack-nova-api.service \
 openstack-nova-scheduler.service \
 openstack-nova-conductor.service \
 openstack-nova-novncproxy.service
 # systemctl start \
 openstack-nova-api.service \
 openstack-nova-scheduler.service \
 openstack-nova-conductor.service \
 openstack-nova-novncproxy.service验证
 . admin-openrc
 openstack compute service list
 openstack catalog list
 openstack image list
 nova-status upgrade check
 [placement]
 # …
 region_name = RegionOne
 project_domain_name = Default
 project_name = service
 auth_type = password
 user_domain_name = Default
 auth_url = http://controller:5000/v3
 username = placement
 password = PLACEMENT_PASSNeutron 网络
官方文档是使用的桥接,比较简单,但是只是几个基本组件练手是没有问题的
 yum install openstack-neutron openstack-neutron-ml2 
 openstack-neutron-linuxbridge ebtables编辑 /etc/neutron/neutron.conf
 [database]
 connection = mysql+pymysql://neutron:密码@controller/neutron[DEFAULT]
 core_plugin = ml2
 service_plugins =transport_url = rabbit://openstack:密码@controller
 auth_strategy = keystone
 notify_nova_on_port_status_changes = true
 notify_nova_on_port_data_changes = true[keystone_authtoken]
 www_authenticate_uri = http://controller:5000
 auth_url = http://controller:5000
 memcached_servers = controller:11211
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 project_name = service
 username = neutron
 password = 密码[nova]
 auth_url = http://controller:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = nova
 password = 密码[oslo_concurrency]
 lock_path = /var/lib/neutron/tmp编辑/etc/neutron/plugins/ml2/ml2_conf.ini
 [ml2]
 type_drivers = flat,vlan,vxlan
 tenant_network_types = vxlan
 mechanism_drivers = linuxbridge,l2population
 extension_drivers = port_security[ml2_type_flat]
 flat_networks = provider
 vni_ranges = 1:1000[securitygroup]
 enable_ipset = true编辑 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
 [linux_bridge]
 physical_interface_mappings = provider:网卡名[vxlan]
 enable_vxlan = true
 local_ip = OVERLAY_INTERFACE_IP_ADDRESS
 l2_population = true[securitygroup]
 enable_security_group = true
 firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver在文件/etc/sysctl.conf后追加
 net.bridge.bridge-nf-call-iptables=1
 net.bridge.bridge-nf-call-ip6tables=1编辑/etc/neutron/l3_agent.ini
 [DEFAULT]
 interface_driver = linuxbridge
 interface_driver = linuxbridge
 dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
 enable_isolated_metadata = true编辑 /etc/neutron/metadata_agent.ini
 [DEFAULT]
 # …
 nova_metadata_host = controller
 metadata_proxy_shared_secret = 密钥编辑 /etc/nova/nova.conf
 [neutron]
 # …
 auth_url = http://controller:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = neutron
 password = 123456
 service_metadata_proxy = true
 metadata_proxy_shared_secret = 密钥创建链接
 ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini合并数据库
 su -s /bin/sh -c “neutron-db-manage --config-file /etc/neutron/neutron.conf 
 –config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head” neutron启动服务
 systemctl restart openstack-nova-api.servicesystemctl enable neutron-server.service 
 neutron-linuxbridge-agent.service neutron-dhcp-agent.service 
 neutron-metadata-agent.service
 systemctl start neutron-server.service 
 neutron-linuxbridge-agent.service neutron-dhcp-agent.service 
 neutron-metadata-agent.service
 systemctl enable neutron-l3-agent.service
 systemctl start neutron-l3-agent.service验证
 openstack network agent listplacement
创建数据库
 mysql -u root -p
 CREATE DATABASE placement;
 GRANT ALL PRIVILEGES ON placement.* TO ‘placement’@‘localhost’ 
 IDENTIFIED BY ‘PLACEMENT_DBPASS’;
 GRANT ALL PRIVILEGES ON placement.* TO ‘placement’@’%’ 
 IDENTIFIED BY ‘PLACEMENT_DBPASS’;创建服务 用户 端点
 . admin-openrc
 openstack user create --domain default --password-prompt placement
 openstack role add --project service --user placement admin
 openstack service create --name placement 
 –description “Placement API” placement
 openstack endpoint create --region RegionOne 
 placement public http://controller:8778
 openstack endpoint create --region RegionOne 
 placement internal http://controller:8778
 openstack endpoint create --region RegionOne 
 placement admin http://controller:8778安装包
 yum install openstack-placement-api修改 /etc/placement/placement.conf
 [placement_database]
 # …
 connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement[api]
 # …
 auth_strategy = keystone[keystone_authtoken]
 # …
 auth_url = http://controller:5000/v3
 memcached_servers = controller:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = placement
 password = PLACEMENT_PASS<Directory /usr/bin> #此处是bug,必须添加下面的配置来启用对placement api的访问,否则在访问apache的
 = 2.4> api时会报403;添加在文件的最后即可
 Require all granted

 <IfVersion < 2.4> #apache版本;允许apache访问/usr/bin目录;否则/usr/bin/placement-api将
 Order allow,deny 不允许被访问
 Allow from all #允许apache访问合并数据库
 su -s /bin/sh -c “placement-manage db sync” placement
 重启服务
 systemctl restart httpd验证
 . admin-openrc
 placement-status upgrade check
 pip install osc-placement
 openstack --os-placement-api-version 1.2 resource class list --sort-column name
 openstack --os-placement-api-version 1.6 trait list --sort-column nameglance 镜像服务
创建数据库
 CREATE DATABASE glance;
 创建用户并设置权限
 GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@‘localhost’ 
 IDENTIFIED BY ‘GLANCE_DBPASS’;
 GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@’%’ 
 IDENTIFIED BY ‘GLANCE_DBPASS’;源管理凭证进入admin-only CLI命令:
 . admin-openrc
 创建glance用户:
 openstack user create --domain default --password-prompt glance
 管理角色添加到glane用户和服务项目:
 openstack role add --project service --user glance admin创建glance服务实例
 openstack service create --name glance 
 –description “OpenStack Image” image创建image服务API端点
 openstack endpoint create --region RegionOne 
 image public http://controller:9292
 openstack endpoint create --region RegionOne 
 image internal http://controller:9292
 openstack endpoint create --region RegionOne 
 image admin http://controller:9292安装包
 yum install openstack-glance
 编辑/etc/glance/glance-api.conf
 [database]
 # …
 connection = mysql+pymysql://glance:镜像服务数据库的密码@controller/glance
 在[keystone_authtoken]和[paste_deploy]部分,配置身份服务访问
 [keystone_authtoken]
 # …
 www_authenticate_uri = http://controller:5000
 auth_url = http://controller:5000
 memcached_servers = controller:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = glance
 password = 密码
 [paste_deploy]
 # …
 flavor = keystone
 [glance_store]# …
 stores = file,http
 default_store = file
 filesystem_store_datadir = /var/lib/glance/images/迁移镜像服务数据库
 su -s /bin/sh -c “glance-manage db_sync” glance设置服务并启动
 systemctl enable openstack-glance-api.service
 systemctl start openstack-glance-api.service获取CLi管理凭证
 . admin-openrc
 下载图片
 wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
 上传图片到图片服务使用QCOW2磁盘格式,裸露的容器格式,和公共可见性所以所有的项目都可以访问:
 glance image-create --name “cirros” 
 –file cirros-0.4.0-x86_64-disk.img 
 –disk-format qcow2 --container-format bare 
 –visibility=public
 查看图片
 glance image-listhorizon 配置dashboard
 安装包
 yum install openstack-dashboard编辑 /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = “controller”
 # 设置允许访问的ip,可以设置为[*]
 ALLOWED_HOSTS = [‘one.example.com’, ‘two.example.com’]
 # 配置缓存
 SESSION_ENGINE = ‘django.contrib.sessions.backends.cache’
 CACHES = {
 ‘default’: {
 ‘BACKEND’: ‘django.core.cache.backends.memcached.MemcachedCache’,
 ‘LOCATION’: ‘controller:11211’,
 }
 }
 # 开启v3 api认证
 OPENSTACK_KEYSTONE_URL = “http://%s/identity/v3” % OPENSTACK_HOST
 OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
 # 配置api版本
 OPENSTACK_API_VERSIONS = {
 “identity”: 3,
 “image”: 2,
 “volume”: 3,
 }
 OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = “Default”
 OPENSTACK_KEYSTONE_DEFAULT_ROLE = “user”
 # 有两种网络,1提供者网络2自助网络,用提供者网络无法使用三层网络服务
 # 我们前面使用的自助网络
 OPENSTACK_NEUTRON_NETWORK = {
 …
 ‘enable_router’: False,
 ‘enable_quotas’: False,
 ‘enable_distributed_router’: False,
 ‘enable_ha_router’: False,
 ‘enable_lb’: False,
 ‘enable_firewall’: False,
 ‘enable_vpn’: False,
 ‘enable_fip_topology_check’: False,
 }
 # 设置时区 上海:CN
 TIME_ZONE = “TIME_ZONE”编辑etc/httpd/conf.d/openstack-dashboard.conf如果不包括就追加在后面
 WSGIApplicationGroup %{GLOBAL}# 重启服务
 systemctl restart httpd.service memcached.service
在那之前需要个身份认证,前面的export的身份信息在重启后就消失了,需要一个adminrc文件,之后重启机器后执行 source adminrc
 adminrc 中的内容,之前用到的export语句,去掉export保留后面内容就行了如果是新手的话,试着学会用下面命令查看服务是否正确运行,查看日志是否报错
 systemctl status openstack-xxx服务
 vi /var/log/nova/nova-xxx.log 查看日志 shirft+g 跳到最底部