haproxy 提供高可用性、负载均衡以及基于 TCP 和 HTTP 应用的代理,支持虚拟主机,它是免费、快速并且可靠的一种解决方案。HAProxy 特别适用于那些负载特大的 web 站点, 这些站点通常又需要会话保持或七层处理。HAProxy 运行在当前的硬件上,完全可以支持数以万计的并发连接。并且它的运行模式使得它可以很简单安全的整 合进您当前的架构中, 同时可以保护你的 web 服务器不被暴露到网络上。
一、haproxy实现负载均衡
主机
get haproxy-1.6.11.tar.gz
传给server1、server4
server1:
安装服务
yum install -y rpm-build
yum install -y pcre-devel
yum install -y gcc
1、haproxy配置过程
解压
[root@server1 ~]# rpmbuild -tb haproxy-1.6.11.tar.gz
[root@server1 ~]# ls
haproxy-1.6.11.tar.gz
[root@server1 ~]# cd rpmbuild/RPMS/x86_64
[root@server1 x86_64]# ls
haproxy-1.6.11-1.x86_64.rpm
[root@server1 x86_64]# rpm -qpl haproxy-1.6.11-1.x86_64.rpm
[root@server1 x86_64]# rpm -ivh haproxy-1.6.11-1.x86_64.rpm
Preparing... ########################################### [100%]
1:haproxy ########################################### [100%]
[root@server1 ~]# tar zxf haproxy-1.6.11.tar.gz
[root@server1 ~]# cd haproxy-1.6.11
[root@server1 haproxy-1.6.11]# find -name *.spec
./examples/haproxy.spec
[root@server1 haproxy-1.6.11]# cd examples/
[root@server1 examples]# cp content-sw-sample.cfg /etc/haproxy/haproxy.cfg
[root@server1 examples]# cd /etc/haproxy/
[root@server1 haproxy]# ls
haproxy.cfg
在这个文件里查看名字才将cfg文件复制命名为haproxy.cfg
[root@server1 haproxy]# vim /etc/init.d/haproxy
7 # config: /etc/haproxy/haproxy.cfg
建立用户
[root@server1 haproxy]# grep 200 /etc/passwd
[root@server1 haproxy]# groupadd -g 200 haproxy
[root@server1 haproxy]# useradd -u 200 -g 200 -M haproxy
[root@server1 haproxy]# id haproxy
uid=200(haproxy) gid=200(haproxy) groups=200(haproxy)
[root@server1 haproxy]# su - haproxy ##无法登陆
su: warning: cannot change directory to /home/haproxy: No such file or directory
-bash-4.1$ logout
[root@server1 haproxy]# vim /etc/security/limits.conf
在文件最后写
50 # End of file
51
52 haproxy - nofile 10000
目录存在
[root@server1 haproxy]# ll -d /var/empty/
drwxr-xr-x. 3 root root 4096 Jul 28 11:12 /var/empty/
2、编辑haproxy配置文件
[root@server1 haproxy]# vim haproxy.cfg
1 #
2 # This is a sample configuration. It illustrates how to separate static obje cts
3 # traffic from dynamic traffic, and how to dynamically regulate the server l oad.
4 #
5 # It listens on 192.168.1.10:80, and directs all requests for Host 'img' or
6 # URIs starting with /img or /css to a dedicated group of servers. URIs
7 # starting with /admin/stats deliver the stats page.
8 #
9
10 global #全局条件
11 maxconn 10000 #最大连接数
12 stats socket /var/run/haproxy.stat mode 600 level admin
13 log 127.0.0.1 local0 #全局的日志配置
14 uid 200 #用户uid
15 gid 200 #用户gid
16 chroot /var/empty
17 daemon
18
19 defaults #默认条件
20 mode http #7层http协议
21 log global
22 option httplog #http日志
23 option dontlognull #空链接
24 monitor-uri /monitoruri
25 maxconn 8000 #最大链接数
26 timeout client 30s #客户端连接超时
27
28 stats uri /admin/stats #监控页面
29
30 option prefer-last-server
31 retries 2 #后端出现问题时连接不超过2次
32 option redispatch
33 timeout connect 5s
34 timeout server 5s
35
36 # The public 'www' address in the DMZ
37 frontend public
38 bind *:80 name clear #监听本机所有接口
39 #bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem
40 #use_backend static if { hdr_beg(host) -i img }
41 #use_backend static if { path_beg /img /css }
42 default_backend static # 默认使用static后端服务器集群
43
44 # The static backend backend for 'Host: img', /img and /css.
45 backend static
46 balance roundrobin #负载均衡算法rr
47 server statsrv1 172.25.10.2:80 check inter 1000 #后端服务器1
48 server statsrv2 172.25.10.3:80 check inter 1000 #后端服务器2
[root@server1 haproxy]# /etc/init.d/haproxy start
server2、server3:
打开httpd服务
/etc/init.d/httpd start
配置默认发布文件
[root@server2 html]#cat /var/www/html/index.html
server2
[root@server3 html]#cat /var/www/html/index.html
server3
检测:
浏览器输入172.25.10.1 可以看见以轮询的方式访问出现server2、server3
浏览器输入http://172.25.10.1/monitoruri
当server2、server3的httpd关闭时,页面不变
浏览器输入http://172.25.10.1/admin/stats 查看监控界面
二、打开日志接收端口
修改配置文件
vim /etc/rsyslog.conf
13 #$ModLoad imudp
14 #$UDPServerRun 514
42 *.info;mail.none;authpriv.none;cron.none;local0.none /var/log /messages
62 local0.* /var/log/haproxy.log
[root@server1 log]# /etc/init.d/rsyslog restart
查看日志
[root@server1 haproxy]# cd /var/log/
[root@server1 log]# cat haproxy.log #一开始查看没有日志
浏览器172.25.10.1界面,把server2的httpd关闭后再打开,再用命令查看便有日志
三、哈希算法
编辑配置文件
vim /etc/haproxy/haproxy.cfg
44 # The static backend backend for 'Host: img', /img and /css.
45 backend static
46 #balance roundrobin
47 balance source
48 server statsrv1 172.25.61.2:80 check inter 1000
49 server statsrv2 172.25.61.3:80 check inter 1000
/etc/init.d/haproxy reload
检测:
浏览器访问172.25.61.1,虽然server2和server3的httpd都开着,但是它只显示一台主机的默认发布文件
四、动静分离
server1:
编辑配置文件
[root@server1 haproxy]# vim haproxy.cfg
42 use_backend static2 if { path_end -i .php }
43 default_backend static1
44
45 # The static backend backend for 'Host: img', /img and /css.
46 backend static1
47 balance roundrobin
48 #balance source
49 server statsrv1 172.25.10.2:80 check inter 1000
50
51 backend static2
52 balance roundrobin
53 server statsrv2 172.25.10.3:80 check inter 1000
/etc/init.d/haproxy reload
server3:
yum install php -y
/etc/init.d/httpd restart
cd /var/www/html/
[root@server3 html]# vim index.php
<?php
phpinfo()
?>
检测:
浏览器访问172.25.61.1/index.php
浏览器访问 172.25.61.1/index.html
动态
静态
五、haproxy 访问控制
1、添加访问控制
[root@server1 html]# cd /etc/haproxy/
[root@server1 haproxy]# vim haproxy.cfg
acl blacklist src 172.25.10.250
http-request deny if blacklist
use_backend static2 if { path_end -i .php }
default_backend static1
[root@server1 haproxy]# /etc/init.d/haproxy reload
检测:
此时访问172.25.61.1出现403错误
2、如果遇见403的错误代码就重定向
修改配置文件
[root@server1 haproxy]# vim haproxy.cfg
acl blacklist src 172.25.10.250
#http-request deny if blacklist
errorloc 403 http://172.25.10.1:8080 #errorloc 控制返回的错误,定向到本机
use_backend static2 if { path_end -i .php }
default_backend static1
安装http服务、修改端口
[root@server1 haproxy]# yum install -y httpd
端口改为8080
[root@server1 haproxy]# vim /etc/httpd/conf/httpd.conf
136 Listen 8080
定义默认发布内容
[root@server1 haproxy]# cd /var/www/html/
[root@server1 html]# vim index.html
网站正在维护中..
[root@server1 html]# /etc/init.d/httpd start
测试
本机访问172.25.61.1 访问页面出现 网站正在维护中...
3、直接重定向
编辑配置文件
[root@server1 haproxy]# vim haproxy.cfg
acl blacklist src 172.25.10.250
#http-request deny if blacklist
#errorloc 403 http://172.25.10.1:8080
redirect location http://172.25.10.3
use_backend static2 if { path_end -i .php }
default_backend static1
[root@server1 haproxy]# /etc/init.d/haproxy reload
测试
访问172.25.61.1时会直接访问到172.25.61.3
五、读写分离
读在server2 ,写在server3
1、图片共享
真机:
get图片传至server2
server1:
vim /etc/haproxy/haproxy.cfg
44 acl blacklist src 172.25.10.250
45
46 acl write method POST
47 acl write method PUT
48
49 #http-request deny if blacklist
50
51 #errorloc 403 http://172.25.10.1:8080
52
53 #redirect location http://www.baidu.com
54
55 use_backend static2 if { path_end -i .php }
56 use_backend static2 if write
57 default_backend static1
/etc/init.d/haproxy reload
server2:
[root@server2 ~]# cd /var/www/html/
[root@server2 html]# mkdir images
[root@server2 html]# cd images/
[root@server2 images]# ls
timg.jpg
测试
浏览器访问172.25.61.1/images/timg.jpg
有图片显示
2、目录共享
真机
get 目录
mirror upload
scp -r /home/kiosk/Desktop/upload 172.25.61.2:/var/www/html
scp -r /home/kiosk/Desktop/upload 172.25.61.3:/var/www/html
server2、server3:
[root@server3 html]# cd upload/
[root@server3 upload]# ls
index.php upload_file.php
[root@server3 upload]# mv * ..
mv: overwrite `../index.php'? y
[root@server3 upload]# cd ..
Vim upload_file.php
5 && ($_FILES["file"]["size"] < 2000000)) ##文件大小改为2M
[root@server3 html]# chmod 777 upload
[root@server3 html]# ll
total 16
-rw-r--r-- 1 root root 8 Aug 4 10:57 index.html
-rw-r--r-- 1 root root 257 Aug 4 13:00 index.php
drwxrwxrwx 2 root root 4096 Aug 4 13:00 upload
-rw-r--r-- 1 root root 927 Aug 4 13:00 upload_file.php
server2:
yum install -y php
/etc/init.d/httpd restart
测试
浏览器访问172.25.61.1
点击Browse 添加图片
点击Submit 保存
上传成功后
六、Haproxy和pacemaker结合corosync实现负载均衡高可用
server4、server1:
yum install pacemaker corosync -y
注:需配置好yum源
[HighAvailability]
[LoadBalancer]
[ResilientStorage]
[ScalableFileSystem]
server4:
[root@server4 ~]# cd /etc/corosync/
[root@server4 corosync]# cp corosync.conf.example corosync.conf
[root@server4 corosync]# vim corosync.conf
10 bindnetaddr: 172.25.10.0
11 mcastaddr: 226.94.1.10
12 mcastport: 5405
在最后写
35 service {
36 name: pacemaker
37 ver: 0
38 }
[root@server4 corosync]# scp corosync.conf server1:/etc/corosync/
[root@server4 ~]# /etc/init.d/corosync start
cat /var/log/messages ##查看日志是否有报错
server1:
[root@server1 haproxy]# /etc/init.d/corosync start
cat /var/log/messages ##查看日志是否有报错
真机
get pssh-2.3.1-2.1.x86_64.rpm crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
传到server1、server4
server4、server1:
yum install -y pssh-2.3.1-2.1.x86_64.rpm crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
server4:
监控
crm_mon
server1:
crm_verify -VL 发现有报错
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# show
node server1
node server4
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# property stonith-enabled=false
crm(live)configure# commit
crm_verify -VL #不报错了
server4:
监控
crm_mon
server1:
添加资源,vip
crm
configure
primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.10.100 cidr_netmask=24 op monitor interval=1min
commit
ip addr查看增加的IP
以下的操作在server4的监控下进行
/etc/init.d/corosync stop
/etc/init.d/corosync start
服务关闭时
服务开启时
server4:
监控
com_mon
server1:
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# show
node server1
node server4
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.10.100" cidr_netmask="24" \
op monitor interval="1min"
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false"
crm(live)configure# property no-quorum-policy=ignore #关闭集群对节点数量的检查,集群默认最少量台主机,如果只是一台主机不能接管资源
crm(live)configure# commit
/etc/init.d/corosync stop
/etc/init.d/corosync start
服务关闭时
server1:
server1:
[root@server1 ~]# cd rpmbuild/RPMS/x86_64/
[root@server1 x86_64]# scp haproxy-1.6.11-1.x86_64.rpm server4:
scp /etc/haproxy/haproxy.cfg server4:/etc/haproxy/
添加资源haproxy
server4:
rpm -ivh haproxy-1.6.11-1.x86_64.rpm
/etc/init.d/haproxy start
监控
crm_mon
server1:
[root@server1 haproxy]# crm
crm(live)# configure
crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=1min
crm(live)configure# commit
crm(live)configure# group hagroup vip haproxy #建立资源组
crm(live)configure# commit
对集群节点的管理
节点一崩溃时,节点4顶替
crm node standby
恢复节点1,不会顶替节点4
crm node online
七、添加资源fence
server1、server4:
yum install -y fence-virt.x86_64 fence-agents.x86_64
查看主机支持fence的代理类型。本次使用fence_xvm
[root@server4 ~]# stonith_admin -I
fence_xvm
server1:
stonith_admin -M -a fence_xvm
真机:
systemctl start fence_virtd
server1、server4:
查看钥匙
cd /etc/cluster
ll fence_xvm.key
真机:
查看虚拟机名字
virsh list
server4:
监控
crm_mon
server1:
[root@server1 cluster]# crm
crm(live)# configure
添加fence资源,并做好集群节点名和真实server的名字映射
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1:HXD1;server4:HXD4" op monitor interval=1min
crm(live)configure# commit
开启fence功能
crm(live)configure# property stonith-enabled=true
crm(live)configure# commit
crm(live)configure# show
node server1 \
attributes standby="off"
node server4 \
attributes standby="off"
primitive haproxy lsb:haproxy \
op monitor interval="1min"
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.61.100" cidr_netmask="24" opmonitor interval="1min"
primitive vmfence stonith:fence_xvm \
params pcmk_host_map="server1:WXJ1;server4:WXJ4" \
op monitor interval="1min"
group hagroup vip haproxy
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="true" \
no-quorum-policy="ignore"
测试fence功能:
首先设置:corsync在两台主机上可其自启动:chkconfig corosync on
用命令down掉另一台主机,它会自动重启,等主机开启后会自动加到集群中
[root@server1 ~]# fence_xvm -H WXJ1
fence_xvm -H WXJ4