saltstack+keepalived+lvs+nginx+apache
框架图解:
- 环境:
Server1:172.25.254.1 salt-master keepalived-master haproxy ipvsadm
Server2:172.25.254.2 salt-minion keepalived-backup haproxy
Server3:172.25.254.3 salt-minion Apache
Server4:172.25.254.4 salt-minion Nginx
1、saltstack安装配置:
libsodium-1.0.16-1.el7.x86_64.rpm PyYAML-3.11-1.el7.x86_64.rpm
openpgm-5.2.122-2.el7.x86_64.rpm
python2-libcloud-2.0.0-2.el7.noarch.rpm salt-2018.3.3-1.el7.noarch.rpm
python-cherrypy-5.6.0-2.el7.noarch.rpm salt-api-2018.3.3-1.el7.noarch.rpm
python-crypto-2.6.1-2.el7.x86_64.rpm salt-cloud-2018.3.3-1.el7.noarch.rpm
python-futures-3.0.3-1.el7.noarch.rpm salt-master-2018.3.3-1.el7.noarch.rpm
python-msgpack-0.4.6-1.el7.x86_64.rpm salt-minion-2018.3.3-1.el7.noarch.rpm
python-psutil-2.2.1-1.el7.x86_64.rpm salt-ssh-2018.3.3-1.el7.noarch.rpm
python-tornado-4.2.1-1.el7.x86_64.rpm salt-syndic-2018.3.3-1.el7.noarch.rpm
python-zmq-15.3.0-3.el7.x86_64.rpm zeromq-4.1.4-7.el7.x86_64.rpm
Sealt-master:server1
yum install -y salt-master.noarch *
yum install -y salt-minion.noarch
Systemctl start salt-master
Salt-minion:server2、server3、server4
yum install -y salt-minion.noarch
Vim /etc/salt/minion
Master: 172.25.254.1
Systemctl start salt-minion
- saltstack自动部署服务
[root@server1 ~]# mkdir /srv/salt
[root@server1 salt]# mkdir keepalived/files -p
[root@server1 salt]# mkdir nginx/files -p
[root@server1 salt]# mkdir httpd/files -p
[root@server1 salt]# mkdir ipvs -p
[root@server1 salt]# mkdir cluster/files -p
[root@server1 salt]# mkdir haproxy/files -p
[root@server1 salt]# mkdir pkg
[root@server1 salt]# mkdir /srv/pillar/web -p
Keepalived部署:
[root@server1 keepalived]# vim install.sls
include:
- pkg.init
keepalived-install:
file.managed:
- name: /root/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
- user: root
- group: root
- mode: 755
cmd.run:
- name: cd /root && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived &> /dev/null && make &> /dev/null && make install &> /dev/null
keepailved-init:
file.managed:
- name: /etc/init.d/keepalived
- source: salt://keepalived/files/keepalived
- user: root
- group: root
- mode: 755
keepalived-sysconfig:
file.managed:
- name: /etc/sysconfig/keepalived
- source: salt://keepalived/files/keepalived.sysconfig
- user: root
- group: root
- mode: 644
/etc/keepalived:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
/sbin/keepalived:
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
在另一台主机安装keepalived,将/usr/local
scp kkeepalived-2.0.6/keepalived/etc/init.d/keepalived root@172.25.254.1:/srv/salt/keepalived/files/
scp /usr/local/keepalived/etc/sysconfig/keepalived root@172.25.254.1:/srv/salt/keepalived/files/keepalived.sysconfig
Ipvsadm部署:
[root@server1 ipvs]# vim install.sls
ipvsadm-install:
pkg.installed:
- name: ipvsadm
Pkg:
[root@server1 salt]# cat pkg/init.sls
pkg-init:
pkg.installed:
- pkgs:
- gcc
- gcc-c++
- glibc
- make
- autoconf
- openssl
- openssl-devel
- pcre-devel
- pcre
Cluster部署:
[root@server1 cluster]# cat lvs-keepalived.sls
include:
- keepalived.install
keepalived-server:
file.managed:
- name: /etc/keepalived/keepalived.conf
- source: salt://cluster/files/keepalived.conf
- mode: 644
- user: root
- group: root
- template: jinja
{% if grains['fqdn'] == 'server1' %}
- ROUTERID: lvs_01
- STATEID: MASTER
- PRIORITYID: 150
{% elif grains['fqdn'] == 'server2' %}
- ROUTERID: lvs_02
- STATEID: BACKUP
- PRIORITYID: 100
{% endif %}
service.running:
- name: keepalived
- enable: True
- watch:
- file: keepalived-server
修改keepaived.conf
scp /usr/local/keepalived/etc/keepalived/keepalived.conf root@172.25.254.1:/srv/salt/cluster/files/
[root@server1 files]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id {{ ROUTERID }}
}
vrrp_instance VI_1 {
state {{ STATEID }}
interface eth0
virtual_router_id 55
priority {{ PRIORITYID }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.254.100/24
}
}
virtual_server 172.25.254.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 300
protocol TCP
real_server 172.25.254.3 80 {
weight 1
TCP_CHECK {
connect_timeout 3
retry 3
delay_before_retry 3
}
}
real_server 172.25.254.4 80 {
weight 1
TCP_CHECK {
connect_timeout 3
retry 3
delay_before_retry 3
}
}
}
Haproxy部署:
[root@server1 haproxy]# cat install.sls
haproxy-create:
group.present:
- name: haproxy
- gid: 900
user.present:
- name: haproxy
- shell: /sbin/nologin
- uid: 900
- gid: 900
- createhome: false
haproxy-install:
file.managed:
- name: /root/haproxy-1.4.24-1.x86_64.rpm
- source: salt://haproxy/files/haproxy-1.4.24-1.x86_64.rpm
cmd.run:
- name: yum install -y /root/haproxy-1.4.24-1.x86_64.rpm
- unless: rpm -q haproxy
/usr/share/haproxy:
file.directory:
- mode: 755
[root@server1 haproxy]# cat service.sls
include:
- haproxy.install
- pkg.init
haproxy-service:
file.managed:
- name: /etc/haproxy/haproxy.cfg
- source: salt://haproxy/files/haproxy.cfg
service.running:
- name: haproxy
- enable: true
- reload: true
- watch:
- file: haproxy-service
[root@server1 files]# cat haproxy.cfg
#this config needs haproxy-1.1.28 or haproxy-1.2.1
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
#log loghost local0 info
maxconn 4096
chroot /usr/share/haproxy
uid 900
gid 900
daemon
#debug
#quiet
defaults
log global
mode http
option httplog
option dontlognull
retries 3
redispatch
maxconn 2000
contimeout 5000
clitimeout 50000
srvtimeout 50000
listen www.westos.org *:80
balance roundrobin
server app1 172.25.254.3:80 check inter 2000
server app2 172.25.254.4:80 check inter 2000
Apache部署:
[root@server1 httpd]# cat install.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /var/www/html/index.html
- source: salt://httpd/files/index.html
- mode: 644
[root@server1 httpd]# cat service.sls
include:
- httpd.install
apache-service:
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://httpd/files/httpd.conf
service.running:
- name: httpd
- enable: true
- reload: true
- watch:
- file: apache-service
测试页:
[root@server1 files]# cat index.html
<h1>server3->>>Apache</h1>
Nginx部署:
[root@server1 nginx]# cat install.sls
include:
- pkg.init
nginx-install:
file.managed:
- name: /root/nginx-1.8.0.tar.gz
- source: salt://nginx/files/nginx-1.8.0.tar.gz
- mode: 755
- user: root
- group: root
user.present:
- name: nginx
- createhome: False
- gid_from_name: True
- shell: /sbin/nologin
cmd.run:
- name: cd /root && tar zxf nginx-1.8.0.tar.gz && cd nginx-1.8.0 && sed -i.bak 's/#define NGINX_VER "nginx\/" NGINX_VERSION/#define NGINX_VER "nginx"/g' src/core/nginx.h && sed -i.bak 's/CFLAGS="$CFLAGS -g"/#CFLAGS="$CFLAGS -g"/g' auto/cc/gcc && ./configure --prefix=/usr/local/nginx/ --user=nginx --group=nginx --with-http_stub_status_module --with-http_ssl_module && make && make install
- creates: /usr/local/nginx
nginx-web:
file.managed:
- name: /usr/local//nginx/html/index.html
- source: salt://nginx/files/index.html
- mode: 644
[root@server1 nginx]# cat service.sls
include:
- nginx.install
nginx-service:
file.managed:
- name: /usr/local/nginx/conf/nginx.conf
- source: salt://nginx/files/nginx.conf
cmd.run:
- name: /usr/local/nginx/sbin/nginx
- creates: /usr/local/nginx/logs/nginx.pid
/usr/local/nginx/sbin/nginx -s reload:
cmd.run:
- onchanges:
- file: nginx-service
[root@server1 files]# cat index.html
<h1>server4->>>nginx</h1>
[root@server1 salt]# cat top.sls
base:
'server1':
- keepalived.install
- ipvs.install
- cluster.lvs-keepalived
- haproxy.service
'server2':
- cluster.lvs-keepalived
- keepalived.install
- haproxy.service
'server3':
- httpd.service
'server4':
- nginx.service
[root@server1 srv]# cd pillar/
[root@server1 pillar]# pwd
/srv/pillar
[root@server1 pillar]# ls
install.sls web
[root@server1 pillar]# cat install.sls
base:
'*':
- web.install
[root@server1 pillar]# cd web/
[root@server1 web]# cat install.sls
{% if grains['fqdn'] == 'server1' %}
web: httpd
{% elif grains['fqdn'] == 'server2' %}
web: nginx
{% endif %}
测试:
在salt-master端推送服务:
[root@server1 salt]# salt '*' state.highstate
在物理机访问设置的VIP,以及域名,注意添加本地解析
VIP此时在server1:
此时将server1的keepalived停止
VIP此时在server2上
在物理机再次访问域名
[kiosk@foundation55 haproxy]$ curl www.westos.org
<h1>server4->>>nginx</h1>
[kiosk@foundation55 haproxy]$ curl www.westos.org
<h1>server3->>>Apache</h1>
[kiosk@foundation55 haproxy]$ curl www.westos.org
<h1>server4->>>nginx</h1>
有个问题:当停止server1的keepalived时候,再次访问VIP时,网页不会刷新,但是访问域名是正常的