1.
rpm -ivh https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
yum clean all
yum install -y epel-release salt-master
vim /etc/salt/master
修改以下内容:
interface: 192.168.1.10
auto_accept: True
systemctl start salt-master
systemctl enable salt-master
2.
rpm -ivh https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
yum clean all
yum -y install salt-minion
vim /etc/salt/minion
修改以下内容:
master: 192.168.1.10
id: node1
末尾添加
schedule:
highstate:
function: state.highstate
minutes: 10
systemctl start salt-minion
systemctl enable salt-minion
3.
rpm -ivh https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
yum clean all
yum -y install salt-minion
vim /etc/salt/minion
修改以下内容:
master: 192.168.1.10
id: node2
末尾添加
schedule:
highstate:
function: state.highstate
minutes: 10
systemctl start salt-minion
systemctl enable salt-minion
SaltStack 批量部署并配置 Nginx
角色主机名IP地址组名Nginx根目录mastermaster.saltstack.com192.168.9.233minion1web01.saltstack.com192.168.9.234web01group/dataminion2web02.saltstack.com192.168.9.235web02group/www
master:
[root@localhost ~]# hostnamectl set-hostname master.saltstack.com
[root@localhost ~]# bash
minion1:
[root@localhost ~]# hostnamectl set-hostname web01.saltstack.com
[root@localhost ~]# bash
minion2:
[root@localhost ~]# hostnamectl set-hostname web02.saltstack.com
[root@localhost ~]# bash
全部主机:
cat << EOF >> /etc/hosts
192.168.9.233 master.saltstack.com
192.168.9.234 web01.saltstack.com
192.168.9.235 web02.saltstack.com
EOF
master:
[root@master ~]# yum install -y epel-release
[root@master ~]# yum install -y salt-master
[root@master ~]# vim /etc/salt/master
查找并修改以下内容:
interface: 192.168.9.233
auto_accept: True
file_roots:
base:
- /srv/salt/
nodegroups:
web01group: 'web01.saltstack.com'
web02group: 'web02.saltstack.com'
pillar_opts: True
pillar_roots:
base:
- /srv/pillar
主控端主要修改内容包括:
[root@master ~]# cat /etc/salt/master | grep -v "^$" | grep -v "^#"
interface: 192.168.9.233
auto_accept: True
file_roots:
base:
- /srv/salt
pillar_roots:
base:
- /srv/pillar
pillar_opts: True
nodegroups:
web01group: 'web01.saltstack.com'
web02group: 'web02.saltstack.com'
[root@master ~]# systemctl start salt-master
[root@master ~]# systemctl enable salt-master
Created symlink from /etc/systemd/system/multi-user.target.wants/salt-master.service to /usr/lib/systemd/system/salt-master.service.
创建 salt 文件根目录及 pillar 目录
[root@master ~]# mkdir /srv/salt
[root@master ~]# mkdir /srv/pillar
minion1:
yum install -y salt-minion
vim /etc/salt/minion
修改以下内容:
master: 192.168.9.233 #设置主控端 IP
id: web01.saltstack.com #设置被控主机名,另一台是 web02.saltstack.com
systemctl start salt-minion
systemctl enable salt-minion
minion2:
yum install -y salt-minion
vim /etc/salt/minion
修改以下内容:
master: 192.168.9.233 #设置主控端 IP
id: web01.saltstack.com #设置被控主机名,另一台是 web02.saltstack.com
systemctl start salt-minion
systemctl enable salt-minion
master:
[root@master ~]# salt '*' test.ping
web01.saltstack.com:
True
web02.saltstack.com:
True
SaltStack 批量部署 Nginx
通过下面的命令可以查看被控机 web01 主机的 grains 所有值
[root@master ~]# salt 'web01.saltstack.com' grains.items
创建 grains 目录,需要将目录下的定制文件同步到被控机上运行;然后,能正
常获取被控机打开文件句柄数
[root@master ~]# mkdir /srv/salt/_grains
[root@master ~]# vim /srv/salt/_grains/nginx_config.py
添加以下内容:
#!/usr/bin/python
import os,sys,commands
def NginxGrains():
'''
return Nginx config grains value
'''
grains = {}
max_open_file=65535
try:
getulimit = commands.getstatusoutput('source /etc/profile;ulimit -n')
except Exception,e:
pass
if getulimit[0]==0:
max_open_file = int(getulimit[1])
grains['max_open_file'] = max_open_file
return grains
[root@master ~]# chmod +x nginx_config.py
确 认 在 主 控 端 是 否 能 获 取 被 控 端 的 max_open_file 值
[root@master ~]# salt '*' grains.item max_open_file
配置 pilllar
[root@master ~]# vim /srv/pillar/top.sls
添加以下内容:
base:
web01group:
- match: nodegroup
- web01server
web02group:
- match: nodegroup
- web02server
“web01group”和”web02group”是/etc/salt/master 中定义的不同的组,对每一个组编写
一个对应的文件指定配置,这里使用的是“web01server”和”web02server”,再分别定义不同
组主机的 Nginx 的根目录,如下所示:
[root@master ~]# vim /srv/pillar/web01server.sls
nginx:
root: /data
[root@master ~]# vim /srv/pillar/web02server.sls
nginx:
root: /www
使用以下命令查看 pillar 配置的情况
[root@master ~]# salt '*' pillar.data nginx
如果不显示结果;重启一下minion1和minion1的服务
systemctl restart salt-minion
定义 state 的入口 top.sls 文件
[root@master ~]# vim /srv/salt/top.sls
base:
'*':
- nginx
定义被控机执行的状态,安装 Nginx 软件、配置、启动
[root@master ~]# vim /srv/salt/nginx.sls
添加以下内容:
nginx:
pkg:
- installed
file.managed:
- source: salt://nginx/nginx.conf
- name: /etc/nginx/nginx.conf
- user: root
- group: root
- mode: 644
- template: jinja
service.running:
- enable: True
- reload: True
- watch:
- file: /etc/nginx/nginx.conf
- pkg: nginx
使用 jinja 模板定义 Nginx 配置文件 nginx.conf,首先创建一个 nginx 目录,因为上面定
义了 Nginx 配置文件的源路径
[root@master ~]# mkdir /srv/salt/nginx
然后在该路径下创建 Nginx 配置文件,nginx.conf 配置文件可以根据自己的需求进行编
写
[root@master ~]# vim /srv/salt/nginx/nginx.conf
添加以下内容:
user nginx;
worker_processes {{grains['num_cpus']}};
{% if grains['num_cpus'] ==1 %}
worker_cpu_affinity 10;
{% elif grains['num_cpus'] ==2 %}
worker_cpu_affinity 01 10;
{% elif grains['num_cpus'] == 4 %}
worker_cpu_affinity 0001 0010 0100 1000;
{% elif grains['num_cpus'] == 8 %}
worker_cpu_affinity 00000001 00000010 00000100 00001000 00010000 00100000 01000000 10000000;
{% else %}
worker_cpu_affinity 0001 0010 0100 1000;
{% endif %}
worker_rlimit_nofile {{ grains['max_open_file'] }};
error_log /var/log/nginx_error.log;
pid /var/run/nginx.pid;
events{
worker_connections {{ grains['max_open_file'] }};
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 60;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"' ;
server{
listen 80 default_server;
server_name _;
location / {
root {{ pillar['nginx']['root'] }};
index index.html index.htm;
}
error_page 404 /404.html;
location = /404.html {
root /usr/share/nginx/html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
全部主机:
yum -y install epel-release.noarch
验证:
两台被控端执行安装 Nginx 并配置
[root@master ~]# salt '*' state.highstate
检查节点nginx的进程
[root@web01 ~]# ps -ef | grep nginx