谨慎参考,图和原理抄过来的,只有实验是自己做的
一、keepalived概述:
概述:keepalived起初为Lvs设计的一款强大的辅助工具,保证Lvs负载调度器的故障切换以及web节点的健康状态检查,后续被应用到很多需要容错的场景下,keepalived自身基于VRRP协议--虚拟路由冗余协议,思科公有协议;
二、设计原理:
应用场景:
设计模块:
1.core模块:为keepalived的核心组件,负责主进程的启动、维护以及全局配置文件的加载和解析;
2.check模块:负责real server 节点池内的节点的健康检测;
3.VRRP模块:在master与backup之间执行心跳检测;
热备实现过程:将多个主机以软件的方式组成一个热备组,通过共有的虚拟ip(VIP)地址对外提供服务,同一时刻,热备组中只有一台主机在工作,别的主机冗余状态,当当前在线的主机失效时,其他冗余的主机会自动接替虚拟ip地址,继续提供服务,以保证架构的稳定性;
三、案例:Keepalived实现双机热备;
案例拓扑:
案例环境:
系统类型 | IP地址 | 主机名 | 所需软件 |
Centos 7.5 1804 64bit | 10.0.3.49 | node1 | keepalived |
Centos 7.5 1804 64bit | 10.0.3.55 | node2 | keepalived |
VIP:10.0.3.56
一、node1和node2安装httpd
[root@node1 ~]# yum install -y httpd
[root@node1 ~]# echo "<h1>node1<h1>" > /var/www/html/index.html
[root@node1 ~]# systemctl start httpd && systemctl enable httpd
[root@node1 ~]# ssh node2 "yum install -y httpd"
[root@node1 ~]# ssh node2 "echo '<h1>node2<h1>' > /var/www/html/index.html"
[root@node1 ~]# ssh node2 "systemctl start httpd && systemctl enable httpd"
二、安装keepalived
直接yum安装了,不使用源码编译安装
[root@node1 ~]# yum install -y keepalived.x86_64
[root@node1 ~]# ssh node2 "yum install -y keepalived.x86_64"
三、在node1上配置master主节点
[root@node1 ~]# vim /etc/keepalived/keepalived.conf
[root@node1 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id TEST_R1 ##本服务器的名称,若环境中有多个keepalived时,此名称不能一致
}
vrrp_instance VI_1 { ##定义VRRP热备实例,每一个keep组都不同
state MASTER ##MASTER表示主服务器
interface ens33 ##承载VIP地址的物理接口
virtual_router_id 51 ##虚拟路由器的ID号,每一个keep组都不同
priority 100 ##优先级,数值越大优先级越高
advert_int 1 ##通告检查间隔秒数(心跳频率)
authentication { ##认证信息
auth_type PASS ##认证类型
auth_pass VVP3VU ##密码字串
}
virtual_ipaddress { ##指定漂移地址(VIP)
10.0.3.56
}
}
virtual_server 10.0.3.56 80 { #vip配置
delay_loop 6 #每隔6秒检查一次real_server状态
lb_algo wrr ##指定lvs的调度算法
lb_kind DR ##lvs集群模式
persistence_timeout 50 ##会话保持时间
protocol TCP ##选择协议
real_server 10.0.3.49 80 { ##本机地址
weight 3 ##服务器的权重
notify_down /etc/keepalived/check.sh ##指定节点失效后,采用的脚本,notify_up表示节点正常后,采用的脚本
##健康检查方式一共有HTTP_GET|SSL_GET|TCP_CHECK|SMTP_CHECK|MISC_CHECK这些
TCP_CHECK{
connect_time 10 ##连接超时时间
nb_get_retry 3 ##重连次数
delay_before_retry 3 ##重连间隔时间
connect_port 80 ##健康检查端口
}
}
}
[root@node1 ~]# vim /etc/keepalived/check.sh
#!/bin/bash
systemctl stop keepalived
echo -e "$(ip a |grep ens33 |grep inet |awk '{print $2}'|awk -F'/' '{print $1}') (httpd) is down on $(date +%F-%T)" >>/root/check_httpd.log
[root@node1 ~]# chmod 777 /etc/keepalived/check.sh
[root@node1 ~]# systemctl start keepalived
[root@node1 ~]# ip a | grep 10.0.3.56
inet 10.0.3.56/32 scope global ens33
四、在node2上配置backup节点
[root@node2 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id TEST_R2 ##本服务器的名称
}
vrrp_instance VI_1 {
state BACKUP ##BACKUP表示从服务器
interface ens33
virtual_router_id 99 ##优先级,低于主服务器
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass VVP3VU
}
virtual_ipaddress {
10.0.3.56
}
}
virtual_server 10.0.3.56 80 { ##vip配置
delay_loop 6 ##每隔6秒检查一次real_server状态
lb_algo wrr
lb_kind DR
persistence_timeout 50 ##会话保持时间
protocol TCP
real_server 10.0.3.56 80 { ##本机地址
weight 3
notify_down /etc/keepalived/check.sh
TCP_CHECK{
connect_timeout 10 ##连接超时时间
nb_get_retry 3 ##重连次数
delay_before_retry 3 ##重连间隔时间
connect_port 80 ##健康检查端口
}
}
}
[root@node2 ~]# vim /etc/keepalived/check.sh
[root@node2 ~]# cat /etc/keepalived/check.sh
#!/bin/bash
systemctl stop keepalived
echo -e "$(ip a |grep ens33 |grep inet |awk '{print $2}'|awk -F'/' '{print $1}') (httpd) is down on $(date +%F-%T)" >>/root/check_httpd.log
[root@node2 ~]# chmod 777 /etc/keepalived/check.sh
[root@node2 ~]# systemctl start keepalived.service
[root@node2 ~]# ip a | grep 10.0.3.56
inet 10.0.3.56/32 scope global ens33
五、客户端访问:
六、node1挂掉
[root@node1 ~]# systemctl stop httpd
[root@node1 ~]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; disabled; vendor preset: disabled)
Active: inactive (dead)
1月 25 16:33:35 node1 Keepalived_healthcheckers[4626]: TCP connection to [10.0.3.49]:80 failed.
1月 25 16:33:35 node1 Keepalived_healthcheckers[4626]: Check on service [10.0.3.49]:80 failed after 1 retry.
1月 25 16:33:35 node1 Keepalived_healthcheckers[4626]: Removing service [10.0.3.49]:80 from VS [10.0.3.56]:80
1月 25 16:33:35 node1 Keepalived_healthcheckers[4626]: Executing [/etc/keepalived/check.sh] for service [10.0.3.49]:80 in VS...6]:80
1月 25 16:33:35 node1 Keepalived_healthcheckers[4626]: Lost quorum 1-0=1 > 0 for VS [10.0.3.56]:80
1月 25 16:33:35 node1 Keepalived[4625]: Stopping
1月 25 16:33:35 node1 systemd[1]: Stopping LVS and VRRP High Availability Monitor...
1月 25 16:33:35 node1 Keepalived_vrrp[4627]: VRRP_Instance(VI_1) sent 0 priority
1月 25 16:33:35 node1 Keepalived_vrrp[4627]: VRRP_Instance(VI_1) removing protocol VIPs.
1月 25 16:33:36 node1 systemd[1]: Stopped LVS and VRRP High Availability Monitor.
Hint: Some lines were ellipsized, use -l to show in full.
七、将node1的httpd和keepalived开启
[root@node1 ~]# systemctl start httpd
[root@node1 ~]# systemctl start keepalived