1、环境准备

IP 主机名 服务
10.0.0.100 ka1.stars.org keepalived+nginx
10.0.0.101 ka2.stars.org keepalived+nginx
10.0.0.102 web1.stars.org nginx作为后端web服务
10.0.0.103 web2.stars.org nginx作为后端web服务
10.0.0.104 client.stars.org 用于验证前面的服务

image.png

2、后端web服务器准备

这里我就是要脚本来安装nginx来充当后端web服务器,分别在两个web服务器上运行一下脚本后再编辑一个测试的页面。
root@web1:~# vim install_all_nginx.sh
#!/bin/bash

SRC_DIR=/usr/local/src
NGINX_URL=http://nginx.org/download/
NGINX_FILE=nginx-1.18.0
TAR=.tar.gz
NGINX_INSTALL_DIR=/apps/nginx
CPUS=`lscpu | awk '/^CPU\(s\)/{print $2}'`

color () {
    RES_COL=60
    MOVE_TO_COL="echo -en \\033[${RES_COL}G"
    SETCOLOR_SUCCESS="echo -en \\033[1;32m"
    SETCOLOR_FAILURE="echo -en \\033[1;31m"
    SETCOLOR_WARNING="echo -en \\033[1;33m"
    SETCOLOR_NORMAL="echo -en \E[0m"
    echo -n "$1" && $MOVE_TO_COL
    echo -n "["
    if [ $2 = "success" -o $2 = "0" ] ;then
        ${SETCOLOR_SUCCESS}
        echo -n $"  OK  "    
    elif [ $2 = "failure" -o $2 = "1"  ] ;then 
        ${SETCOLOR_FAILURE}
        echo -n $"FAILED"
    else
        ${SETCOLOR_WARNING}
        echo -n $"WARNING"
    fi
    ${SETCOLOR_NORMAL}
    echo -n "]"
    echo 
}

os_type () {
   awk -F'[ "]' '/^NAME/{print $2}' /etc/os-release
}

os_version () {
   awk -F'"' '/^VERSION_ID/{print $2}' /etc/os-release
}

check () {
    [ -e ${NGINX_INSTALL_DIR} ] && { color "nginx 已安装,请卸载后再安装" 1; exit; }
    cd  ${SRC_DIR}
    if [  -e ${NGINX_FILE}${TAR} ];then
        color "相关文件已准备好" 0
    else
        color '开始下载 nginx 源码包' 0
        wget ${NGINX_URL}${NGINX_FILE}${TAR} 
        [ $? -ne 0 ] && { color "下载 ${NGINX_FILE}${TAR}文件失败" 1; exit; } 
    fi
} 

install () {
    color "开始安装 nginx" 0
    if id nginx  &> /dev/null;then
        color "nginx 用户已存在" 1 
    else
        useradd -s /sbin/nologin -r nginx
        color "创建 nginx 用户" 0 
    fi
    color "开始安装 nginx 依赖包" 0
    if [ `os_type` == "CentOS" -a `os_version` == '8' ] ;then
        yum -y -q install make gcc-c++ libtool pcre pcre-devel zlib zlib-devel openssl openssl-devel perl-ExtUtils-Embed 
    elif [ `os_type` == "CentOS" -a `os_version` == '7' ];then
        yum -y -q  install make gcc pcre-devel openssl-devel zlib-devel perl-ExtUtils-Embed
    else
        apt update &> /dev/null
        apt -y install make gcc libpcre3 libpcre3-dev openssl libssl-dev zlib1g-dev &> /dev/null
    fi
    cd $SRC_DIR
    tar xf ${NGINX_FILE}${TAR}
    NGINX_DIR=`echo ${NGINX_FILE}${TAR}| sed -nr 's/^(.*[0-9]).*/\1/p'`
    cd ${NGINX_DIR}
    ./configure --prefix=${NGINX_INSTALL_DIR} --user=nginx --group=nginx --with-http_ssl_module --with-http_v2_module --with-http_realip_module --with-http_stub_status_module --with-http_gzip_static_module --with-pcre --with-stream --with-stream_ssl_module --with-stream_realip_module 
    make -j $CPUS && make install 
    [ $? -eq 0 ] && color "nginx 编译安装成功" 0 ||  { color "nginx 编译安装失败,退出!" 1 ;exit; }
    echo "PATH=${NGINX_INSTALL_DIR}/sbin:${PATH}" > /etc/profile.d/nginx.sh
    cat > /lib/systemd/system/nginx.service <<EOF
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target

[Service]
Type=forking
PIDFile=${NGINX_INSTALL_DIR}/logs/nginx.pid
ExecStartPre=/bin/rm -f ${NGINX_INSTALL_DIR}/logs/nginx.pid
ExecStartPre=${NGINX_INSTALL_DIR}/sbin/nginx -t
ExecStart=${NGINX_INSTALL_DIR}/sbin/nginx
ExecReload=/bin/kill -s HUP \$MAINPID
KillSignal=SIGQUIT
LimitNOFILE=100000
TimeoutStopSec=5
KillMode=process
PrivateTmp=true

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable --now nginx &> /dev/null 
    systemctl is-active nginx &> /dev/null ||  { color "nginx 启动失败,退出!" 1 ; exit; }
    color "nginx 安装完成" 0
}

check
install
root@web1:~# bash install_all_nginx.sh
root@web1:~# echo "Welcome to `hostname -I` test page" > /apps/nginx/html/index.html

#客户端验证页面
root@client:~# curl 10.0.0.102
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.103
Welcome to 10.0.0.103  test page

3、实现keepalived高可用

两台keepalived服务器都需要安装nginx和keepalived服务,keepalived集群实现高可用的时候是有两种模式的,可以采用主从架构和主主架构这两种默认。在这里我们就分别演示一下。

3.1、主从架构

3.1.1、安装nginx服务并配置

这里安装的话我还是用前面的脚本一键安装nginx服务,安装完后并配置一下将后端的两个web服务设置一下代理转发,这里还可以在upstream模块中的server服务器中添加权重,方便验证。

ka1节点:
root@ka1:~# bash install_all_nginx.sh
root@ka1:~# vim /apps/nginx/conf/nginx.conf
#在http语句块中加入下面内容,server的话可以在原有的上面做修改
    upstream ka_web {
        server 10.0.0.102:80 weight=2 max_fails=3 fail_timeout=30s;
        server 10.0.0.103:80 weight=1 max_fails=3 fail_timeout=30s;
    }
    server {
        listen       80;
        server_name  localhost;
        root /apps/nginx/html;
        location / {
            proxy_pass http://ka_web;
        }
root@ka1:~# nginx -t
nginx: the configuration file /apps/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /apps/nginx/conf/nginx.conf test is successful
root@ka1:~# nginx -s reload

ka2节点:
root@ka2:~# bash install_all_nginx.sh
root@ka2:~# vim /apps/nginx/conf/nginx.conf
    upstream ka_web {
        server 10.0.0.102:80 weight=1 max_fails=3 fail_timeout=30s;
        server 10.0.0.103:80 weight=2 max_fails=3 fail_timeout=30s;
    }
    server {
        listen       80;
        server_name  localhost;
        root /apps/nginx/html;
        location / {
            proxy_pass http://ka_web;
        }
root@ka2:~# nginx -t
nginx: the configuration file /apps/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /apps/nginx/conf/nginx.conf test is successful
root@ka2:~# nginx -s reload

验证访问测试:
#我在ka1节点上给server 10.0.0.102加了权重,就会得到下面的结果
root@client:~# curl 10.0.0.100
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.100
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.100
Welcome to 10.0.0.103  test page

#我在ka2节点上给server 10.0.0.103加了权重,就会得到下面的结果
root@client:~# curl 10.0.0.101
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.101
Welcome to 10.0.0.103  test page
root@client:~# curl 10.0.0.101
Welcome to 10.0.0.103  test page

3.1.2、安装keepalived并配置服务

这里keepalived服务我就就用apt包安装了,这里安装后服务是没有起来的,缺少一个keepalived.conf配置文件。
ka1节点:
root@ka1:~# apt -y install keepalived
root@ka1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
root@ka1:~# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka1.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}
root@ka1:~# systemctl restart keepalived.service

ka2节点:
root@ka2:~# apt -y install keepalived
root@ka2:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
root@ka2:~# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka2.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 88
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}
root@ka2:~# systemctl restart keepalived.service

客户端访问测试:
#在前面ka1主机的nginx配置时我给server 10.0.0.102给了weight=2,添加了权重
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page

3.1.3、模拟MASTER节点故障

#当我把MASTER节点的keepalived服务停了模拟主节点出现故障,这时主节点的VIP会飘到从节点上,从而实现了VIP的自动切换,这是客户端还是可以通过访问VIP来访问到后端服务的,这时就会用到ka2主机上的nginx代理了。
ka1节点:
root@ka1:~# systemctl stop keepalived.service
root@ka1:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:5e:72:f9 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.100/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe5e:72f9/64 scope link 
       valid_lft forever preferred_lft forever

ka2节点:
root@ka2:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:e8:99:4d brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fee8:994d/64 scope link 
       valid_lft forever preferred_lft forever

客户端访问测试:
#在前面ka2主机的nginx配置时我给server 10.0.0.103给了weight=2,添加了权重
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page

3.1.4、恢复MASTER节点重回集群

当ka1主机恢复后重回集群时,由于当时设置的ka1的优先级是100,而ka2主机设置的优先级是80,当现在在查看ip是,VIP就立即飘回ka1节点上,这里使用的是抢占式模式,也是默认的模式,这里也可以设置延迟一会在把VIP抢过去。

ka1节点:
root@ka1:~# systemctl restart keepalived.service
root@ka1:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:5e:72:f9 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.100/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe5e:72f9/64 scope link 
       valid_lft forever preferred_lft forever

ka2节点:
root@ka2:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:e8:99:4d brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fee8:994d/64 scope link 
       valid_lft forever preferred_lft forever

客户端访问测试:
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page

3.2、主主架构

在主主架构中,ka1和ka2是保持正常服务的,客户端可以通过访问两个节点同时访问到后端对应的服务器,当某个节点出现故障后,还是会飘到正常的节点上。

3.2.1、keepalived配置文件配置

ka1节点:
root@ka1:~# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka1.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}

vrrp_instance VI_2 {
    state BACKUP
    interface eth0
    virtual_router_id 99
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314                                                                                                                                                                                      
    }
    virtual_ipaddress {
        10.0.0.201 dev eth0 label eth0:2
    }
}
root@ka1:~# systemctl restart keepalived.service
root@ka1:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:5e:72:f9 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.100/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe5e:72f9/64 scope link 
       valid_lft forever preferred_lft forever

ka2节点:
root@ka2:~# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka2.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 88
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}

vrrp_instance VI_2 {
    state MASTER
    interface eth0
    virtual_router_id 99
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314                                                                                                                                                                                      
    }
    virtual_ipaddress {
        10.0.0.201 dev eth0 label eth0:2
    }
}
root@ka2:~# systemctl restart keepalived.service
root@ka2:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:e8:99:4d brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.201/32 scope global eth0:2
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fee8:994d/64 scope link 
       valid_lft forever preferred_lft forever

客户端访问测试:
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page

root@client:~# curl 10.0.0.201
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.103  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.103  test page

3.2.2、模拟某一个主节点出现故障

这时模拟一下ka2上面的keepalived出现故障,查看IP时,ka2上的VIP会飘到ka1上,这时还是可以通过访问VIP来访问后端的服务。

ka2节点:
root@ka2:~# systemctl stop keepalived.service
root@ka2:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:e8:99:4d brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fee8:994d/64 scope link 
       valid_lft forever preferred_lft forever

ka1节点:
root@ka1:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:5e:72:f9 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.100/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet 10.0.0.201/32 scope global eth0:2
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe5e:72f9/64 scope link 
       valid_lft forever preferred_lft forever

客户端访问测试:
#这时就会有ka1上的nginx来做代理了
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page

root@client:~# curl 10.0.0.201
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.103  test page

3.2.3、恢复ka2节点回到集群

ka2节点:
root@ka2:~# systemctl restart keepalived.service 
root@ka2:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:e8:99:4d brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.201/32 scope global eth0:2
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fee8:994d/64 scope link 
       valid_lft forever preferred_lft forever

ka1节点:
root@ka1:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:5e:72:f9 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.100/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe5e:72f9/64 scope link 
       valid_lft forever preferred_lft forever

客户端测试访问:
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.103  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.102  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.103  test page
root@client:~# curl 10.0.0.201
Welcome to 10.0.0.103  test page

3.3、使用子配置文件

#创建子配置文件的存放路径
root@ka1:~# mkdir /etc/keepalived/conf.d
#在主配置文件中引入子配置文件的路径
root@ka1:~# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka1.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}
include /etc/keepalived/conf.d/*.conf

root@ka1:~# vim /etc/keepalived/conf.d/vrrp_200.conf
vrrp_instance VIP_200 {                                                                                                                                                                                         
    state MASTER
    interface eth0
    virtual_router_id 88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}
root@ka1:~# vim /etc/keepalived/conf.d/vrrp_201.conf
vrrp_instance VIP_201 {                                                                                                                                                                                         
    state BACKUP
    interface eth0
    virtual_router_id 99
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_passwm521314
    }
    virtual_ipaddress {
        10.0.0.201 dev eth0 label eth0:2
    }
}
#把刚刚设置的vrrp_instance在/etc/keepalived/conf.d/这个目录下写一个.conf文件,在把服务重启一下就好了。
root@ka1:~# systemctl restart keepalived.service