keepalived+nginx实验

环境准备

node1(Nginx1):192.168.10.10 
node2(Nginx2):192.168.10.20
node3(
WEB1):192.168.10.30
node4(
WEB2):192.168.10.40
VIP:192.168.10.100

web部署

在node3和node4执行下面的脚本: 
#!/bin/bash
yum install net-tools httpd -y
systemctl stop firewalld
setenforce 0
echo "<h1>This is RS1</h1>" > /var/www/html/index.html # 修改不同的主页以便测试!
systemctl start httpd

nginx部署

node1和node2节点执行以下脚本: 
#!/bin/bash
systemctl stop firewalld
setenforce 0
yum install nginx -y
cat > /etc/nginx/conf.d/proxy.conf << EOF
upstream websers{
server 192.168.10.30;
server 192.168.10.40;
}
server{
listen 8080;
server_name 192.168.10.10;
location / {
proxy_pass http://websers;
}
}
EOF
nginx -s reload

Keepalived部署

在node1和node2节点执行以下脚本:
#!/bin/bash
yum install keepalived -y
mv /etc/keepalived/keepalived.conf{,.bak}
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id node1 # node2修改
}
vrrp_instance VI_1 {
state MASTER # node2节点BACKUP
interface ens33
virtual_router_id 10
priority 100 # node2节点小于100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100
}
}

要自定义脚本检测nginx服务是否正常(没有可以判断nginx服务是否挂掉)

扩充:由于nginx 服务中断后,不能正常替换负载均衡器,故写入脚本

在主配置文件中加入

[root@node1 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
global_defs {
router_id node1
}
# 定义script
vrrp_script chk_http_port {
script "/usr/local/src/check_nginx_pid.sh"
interval 1
weight -2 # 优先级-2
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 10
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
# 调用script脚本
track_script {
chk_http_port
}
virtual_ipaddress {
192.168.10.100
}
}
[root@node1 ~]# cat /usr/local/src/check_nginx_pid.sh
#!/bin/bash
# 当nginx正常的时候状态码为0
# 当nginx不正常时候状态码为1
# 因为当退出状态码为非0的时候会执行切换
nginx_process_number=`ps -C nginx --no-header | wc -l`
if [ $nginx_process_number -eq 0 ];then
# systemctl restart nginx
nginx_process_number=`ps -C nginx --no-header | wc -l`
if [ $nginx_process_number -eq 0 ];then
exit 1
else
exit 0
fi
else
exit 0
fi

Keepalived+HAProxy+mysql双主实验

环境准备

node1(HAProxy1):192.168.10.10 
node2(HAProxy2):192.168.10.20
node3(Mysql1):192.168.10.30
node4(Mysql2):192.168.10.40
VIP:192.168.10.99

mysql部署

在node3和node4执行以下脚本: 
#!/bin/bash
Ip_addr="192.168.10.40" # 修改为对端的node地址
User_pwd="000000"
systemctl stop firewalld
setenforce 0
yum install mariadb-server -y
sed -i '/^\[mysqld\]$/a\binlog-ignore = information_schema'
/etc/my.cnf.d/server.cnf
sed -i '/^\[mysqld\]$/a\binlog-ignore = mysql' /etc/my.cnf.d/server.cnf
sed -i '/^\[mysqld\]$/a\skip-name-resolve' /etc/my.cnf.d/server.cnf
sed -i '/^\[mysqld\]$/a\auto-increment-increment = 1'
/etc/my.cnf.d/server.cnf # 注意node4节点上必须不同
sed -i '/^\[mysqld\]$/a\log-bin = mysql-bin' /etc/my.cnf.d/server.cnf
sed -i '/^\[mysqld\]$/a\auto_increment_offset = 1' /etc/my.cnf.d/server.cnf
# 注意node4节点上必须不同
sed -i '/^\[mysqld\]$/a\server-id = 1' /etc/my.cnf.d/server.cnf # 注意node4节
点上必须不同
systemctl restart mariadb
mysql -uroot -e "grant replication slave on *.* to 'repuser'@'$Ip_addr'
identified by '$User_pwd';"

查询node3节点master状态:
MariaDB [(none)]> show master status;
+------------------+----------+--------------+--------------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+------------------+----------+--------------+--------------------------+
| mysql-bin.000003 | 402 | | mysql,information_schema |
+------------------+----------+--------------+--------------------------+
查询node4节点master状态
MariaDB [(none)]> show master status;
+------------------+----------+--------------+--------------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+------------------+----------+--------------+--------------------------+
| mysql-bin.000003 | 407 | | mysql,information_schema |
+------------------+----------+--------------+--------------------------+
在node3节点执行连接命令:
MariaDB [(none)]> change master to
master_host='192.168.10.40',master_port=3306,master_user='repuser',master_pa
ssword='000000',master_log_file='mysql-bin.000003',master_log_pos=407;
MariaDB [mysql]> start slave;
在node4节点执行连接命令:
MariaDB [(none)]> change master to
master_host='192.168.10.30',master_port=3306,master_user='repuser',master_pa
ssword='000000',master_log_file='mysql-bin.000003',master_log_pos=402;
MariaDB [mysql]> start slave;
查看从节点状态: show slave status \G; 观察IO和SQL线程是否为YES
MariaDB [(none)]> show slave status \G;
Slave_IO_Running: Yes
Slave_SQL_Running: Yes

HAProxy部署

在node1和node2上执行以下脚本: 
#!/bin/bash
yum install haproxy -y
mv /etc/haproxy/haproxy.cfg{,.bak}
cat > /etc/haproxy/haproxy.cfg << EOF
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats

listen mysql_proxy
bind 0.0.0.0:3306
mode tcp
balance source
server mysqldb1 192.168.10.30:3306 weight 1 check
server mysqldb2 192.168.10.40:3306 weight 2 check

listen stats
mode http
bind 0.0.0.0:8080
stats enable
stats uri /dbs
stats realm haproxy\ statistics
stats auth admin:admin
EOF
systemctl start haproxy

keepalived部署

node1上执行以下脚本: 
#!/bin/bash
yum install keepalived -y
mv /etc/keepalived/keepalived.conf{,.bak}
cat > etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id node1
}
vrrp_script chk_http_port {
script "/usr/local/src/check_proxy_pid.sh"
interval 1
weight -2
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 10
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_http_port
}
virtual_ipaddress {
192.168.10.100
}
}
EOF
systemctl start keepalived
node2上执行以下脚本:
#!/bin/bash
yum install keepalived -y
mv /etc/keepalived/keepalived.conf{,.bak}
cat > etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id node2
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 10
priority 99
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.100
}
}
EOF
systemctl start keepalived
[root@node1 src]# cat check_proxy_pid.sh
#!/bin/bash
A=`ps -C haproxy --no-header | wc -l`
if [ $A -eq 0 ];then
exit 1
else
exit 0
fi

Keepalived+LVS实验

环境准备

node1(DS1):192.168.10.10 
node2(DS2):192.168.10.20
node3(RS1):192.168.10.30
node4(RS2):192.168.10.40
VIP:192.168.10.99(单主模型)

RS部署

在node3和node4执行下面的脚本: 
#!/bin/bash
yum install net-tools httpd -y
systemctl stop firewalld
setenforce 0
vip="192.168.10.99"
mask="255.255.255.255"
ifconfig lo:0 $vip broadcast $vip netmask $mask up
route add -host $vip lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "<h1>This is RS1</h1>" > /var/www/html/index.html # 修改不同的主页以便测
试!
systemctl start httpd

DR部署

[root@node1 ~]# yum install keepalived ipvsadm -y 
[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# setenforce 0
[root@node1 keepalived]# mv keepalived.conf{,.bak}
[root@node1 keepalived]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
router_id node1 # 设置lvs的id,一个网络中应该唯一
}
vrrp_instance VI_1 {
state MASTER # 指定Keepalived的角色
interface ens33 # 网卡
virtual_router_id 10 # 虚拟路由器ID,主备需要一样
priority 100 # 优先级越大越优,backup路由器需要设置比这小!
advert_int 1 # 检查间隔1s
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.99 # 定义虚拟IP地址,可以定义多个
}
}
# 定义虚拟主机,对外服务的IP和port
virtual_server 192.168.10.99 80 {
delay_loop 6 # 设置健康检查时间,单位是秒
lb_algo wrr # 负责调度算法
lb_kind DR # LVS负载均衡机制
persistence_timeout 0
protocol TCP
# 指定RS主机IP和port
real_server 192.168.10.30 80 {
weight 2
# 定义TCP健康检查
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.10.40 80 {
weight 1
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}