HA Cluster基础及heartbeat实现HA
配置环境
node1:192.168.1.121CentOS6.7
node2:192.168.1.122CentOS6.7
node3:192.168.1.123CentOS6.7
vip 192.168.1.88
配置前准备
# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.121 node1
192.168.1.122 node2
192.168.1.123 node3
# ssh-keygen -t rsa -P ''
# ssh-copy-id -i ~/.ssh/id_rsa.pub node1
# ssh-copy-id -i ~/.ssh/id_rsa.pub node2
# ssh-copy-id -i ~/.ssh/id_rsa.pub node3
# rpm -ivh epel-release-latest-6.noarch.rpm
# yum -y ansible
# yum -y install ansible
# cat /etc/ansible/hosts
[ha]
192.168.1.121
192.168.1.122
192.168.1.123
# ansible ha -m copy -a 'src=/etc/hosts dest=/etc'
# ansible ha -m shell -a 'ntpdate 192.168.1.62'
# ansible ha -m cron -a 'minute="*/3" job="/usr/sbin/ntpdate 192.168.1.62" name="ntpdate"'
# ansible ha -m copy -a 'src=/root/heartbeat2 dest=/root'
node1、node2和node3
#yum -y install net-snmp-libs libnet PyXML perl-Time-Date
# ls heartbeat2/
heartbeat-2.1.4-12.el6.x86_64.rpm#主程序包
heartbeat-debuginfo-2.1.4-12.el6.x86_64.rpm
heartbeat-devel-2.1.4-12.el6.x86_64.rpm
heartbeat-gui-2.1.4-12.el6.x86_64.rpm#提供hb-gui的包,可以先不装
heartbeat-ldirectord-2.1.4-12.el6.x86_64.rpm#后端健康检测的工具
heartbeat-pils-2.1.4-12.el6.x86_64.rpm
heartbeat-stonith-2.1.4-12.el6.x86_64.rpm#提供仲裁设备,节点隔离的包
# cd heartbeat2/
# rpm -ivh heartbeat-2.1.4-12.el6.x86_64.rpm heartbeat-pils-2.1.4-12.el6.x86_64.rpm heartbeat-stonith-2.1.4-12.el6.x86_64.rpm
[root@node1 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/{ha.cf,haresources,authkeys} /etc/ha.d/
[root@node1 ha.d]# cd /etc/ha.d/
[root@node1 ha.d]# chmod 600 authkeys
[root@node1 ha.d]# openssl rand -base64 4
nuGXcw==
[root@node1 ha.d]# vim authkeys
修改
#auth 1
#2 sha1 HI!
为
auth 2
2 sha1 nuGXcw==
[root@node1 ha.d]# vim ha.cf
修改
#logfile /var/log/ha-log
为
logfile /var/log/ha-log
修改
logfacility local0
为
#logfacility local0
修改
#mcast eth0 225.0.0.1 694 1 0
为
mcast eth0 225.23.190.1 694 1 0
在node kathy(212行左右)下添加
node node1#指明所有的node结束
node node2
node node3
在#ping 10.10.10.254(223行左右)下添加
ping 192.168.1.1#仲裁设备
修改
#compression bz2#是否压缩
#compression_threshold 2#多大的文件才压缩
为
compression bz2
compression_threshold 2
[root@node1 ha.d]# vim haresources
在末尾添加
node1 192.168.1.80/16/eth0/192.168.255.255 httpd
[root@node1 ha.d]# scp -p authkeys ha.cf haresources node2:/etc/ha.d
[root@node1 ha.d]# scp -p authkeys ha.cf haresources node3:/etc/ha.d
[root@node1 ~]# vim /var/www/html/index.html
<h1>node1.magedu.com</h1>
[root@node1 ~]# service httpd stop
Stopping httpd: [ OK ]
[root@node1 ~]# chkconfig httpd off
[root@node2 ~]# vim /var/www/html/index.html
<h1>node2.magedu.com</h1>
[root@node2 ~]# service httpd stop
Stopping httpd: [ OK ]
[root@node2 ~]# chkconfig httpd off
[root@node3 ~]# vim /var/www/html/index.html
<h1>node3.magedu.com</h1>
[root@node3 ~]# service httpd stop
Stopping httpd: [ OK ]
[root@node3 ~]# chkconfig httpd off
[root@node1 ha.d]# ansible ha -m service -a 'name=heartbeat state=started'
03HA Cluster概念扩展及heartbeat实现
使自己变成备用结点
[root@node1 heartbeat]# /usr/lib64/heartbeat/hb_standby
使用变回主结点
[root@node1 heartbeat]# /usr/lib64/heartbeat/hb_takeover
[root@node3 ~]# mkdir /web/htdocs -pv
[root@node3 ~]# vim /web/htdocs/index.html
<h1>Page On NFS Server</h1>
[root@node3 ~]# vim /etc/exports
/web/htdocs 192.168.1.0/24(rw,no_root_squash)
[root@node3 ~]# service nfs start
[root@node1 heartbeat]# service heartbeat stop;ssh node2 'service heartbeat stop'
[root@node1 ~]# cd /etc/ha.d/
[root@node1 ha.d]# vim haresources
在末尾添加
node1 192.168.1.80/16/eth0/192.168.255.255 Filesystem::192.168.1.123::/web/htdocs::/var/www/htm::nfs httpd
[root@node1 ha.d]# service heartbeat start;ssh node2 'service heartbeat start'
04实现高可用mysql集群
[root@node1 ~]# service heartbeat stop;ssh node2 'service heartbeat stop'
[root@node1 ~]# cd /etc/ha.d
[root@node1 ha.d]# vim ha.cf
添加
crm on
[root@node2 ~]# vim /etc/ha.d/ha.cf
添加
crm on
[root@node1 ha.d]# cd /root/heartbeat2/
[root@node1 heartbeat2]# yum -y install pygtk2-libglade
[root@node1 heartbeat2]# rpm -ivh heartbeat-gui-2.1.4-12.el6.x86_64.rpm
[root@node2 ~]# yum -y install pygtk2-libglade
[root@node2 ~]# rpm -ivh heartbeat2/heartbeat-gui-2.1.4-12.el6.x86_64.rpm
[root@node1 ~]# ansible ha -m service -a 'name=heartbeat state=started'
监控集群:
[root@node1 ha.d]# crm_mon
[root@node1 ha.d]# ansible ha -m shell -a 'echo "mageedu" | passwd --stdin hacluster'#图形界面默认用户
[root@node1 ha.d]# hb_gui &#打开图形界面
[root@node3 ~]# fdisk /dev/sda
WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
switch off the mode (command 'c') and change display units to
sectors (command 'u').
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Selected partition 4
First cylinder (2898-15665, default 2898):
Using default value 2898
Last cylinder, +cylinders or +size{K,M,G} (2898-15665, default 15665): +30G
Command (m for help): t
Partition number (1-4): 4
Hex code (type L to list codes): 8e
Changed system type of partition 4 to 8e (Linux LVM)
Command (m for help): w
[root@node3 ~]# partx -a /dev/sda
BLKPG: Device or resource busy
error adding partition 1
BLKPG: Device or resource busy
error adding partition 2
BLKPG: Device or resource busy
error adding partition 3
BLKPG: Device or resource busy
error adding partition 4
[root@node3 ~]# pvcreate /dev/sda4
Physical volume "/dev/sda4" successfully created
[root@node3 ~]# vgcreate myvg /dev/sda4
Volume group "myvg" successfully created
You have new mail in /var/spool/mail/root
[root@node3 ~]# lvcreate -L 10G -n mydata myvg
Logical volume "mydata" created.
[root@node3 ~]# mke2fs -t ext4 /dev/myvg/mydata
[root@node3 ~]# mkdir /mydata
[root@node3 ~]# vim /etc/fstab
添加
/dev/myvg/mydata /mydata ext4 defaults 0 0
[root@node3 ~]# vim /etc/exports
添加
/mydata 192.168.1.0/24(rw,no_root_squash)
[root@node3 ~]# groupadd -r -g 306 mysql
[root@node3 ~]# useradd -r -g 306 -u 306 mysql
[root@node3 ~]# mkdir /mydata/data
[root@node3 ~]# chown -R mysql.mysql /mydata/data/
[root@node3 ~]# exportfs -arv
exporting 192.168.1.0/24:/mydata
exporting 192.168.1.0/24:/web/htdocs
[root@node1 ~]# mkdir /mydata
[root@node2 ~]# mkdir /mydata
[root@node1 ~]# mount -t nfs 192.168.1.123:/mydata /mydata/
[root@node1 ~]# groupadd -r -g 306 mysql
[root@node1 ~]# useradd -r -g 306 -u 306 mysql
[root@node1 ~]# su - mysql
-bash-4.1$ cd /mydata/data/
-bash-4.1$ touch a.txt
-bash-4.1$ ll
total 0
-rw-rw-r-- 1 mysql mysql 0 Sep 27 08:44 a.txt
-bash-4.1$ rm a.txt
bash-4.1$ exit
[root@node2 ~]# groupadd -r -g 306 mysql
[root@node2 ~]# useradd -r -g 306 -u 306 mysql
[root@node2 ~]# mount -t nfs 192.168.1.123:/mydata /mydata
[root@node2 ~]# su - mysql
-bash-4.1$ cd /mydata/data/
-bash-4.1$ touch b.txt
-bash-4.1$ exit
[root@node3 ~]# ll /mydata/data/
total 0
-rw-rw-r-- 1 mysql mysql 0 Sep 27 08:46 b.txt
[root@node2 ~]# touch /mydata/data/c.txt
[root@node2 ~]# ll /mydata/data/
total 0
-rw-rw-r-- 1 mysql mysql 0 Sep 27 08:46 b.txt
-rw-r--r-- 1 root root 0 Sep 27 08:50 c.txt
[root@node2 ~]# rm /m-f ydata/data/*txt
Node1配置mysql
[root@node1 ~]# tar xf mariadb-5.5.43-linux-x86_64.tar.gz -C /usr/local/
[root@node1 ~]# cd /usr/local/
[root@node1 local]# ln -sv mariadb-5.5.43-linux-x86_64 mysql
[root@node1 local]# cd mysql
[root@node1 mysql]# chown -R root.mysql ./*
[root@node1 mysql]# ./scripts/mysql_install_db --datadir=/mydata/data/ --user=mysql
[root@node1 mysql]# mkdir /etc/mysql
[root@node1 mysql]# cp support-files/my-large.cnf /etc/mysql/my.cnf
[root@node1 mysql]# vim /etc/mysql/my.cnf
在 thread_concurrency = 8(41行左右)后添加
datadir = /mydata/data
innodb_file_per_table = on
skip_name_resolv = on
[root@node1 mysql]# cp support-files/mysql.server /etc/init.d/mysqld
[root@node1 mysql]# chkconfig --add mysqld
[root@node1 mysql]# service mysqld start
[root@node1 mysql]# /usr/local/mysql/bin/mysql
MariaDB [(none)]> create database mydb;
MariaDB [(none)]> \q
[root@node1 mysql]# service mysqld stop
[root@node1 mysql]# chkconfig mysqld off
[root@node1 ~]# umount /mydata
Node2配置mysql
[root@node2 ~]# tar xf mariadb-5.5.43-linux-x86_64.tar.gz -C /usr/local/
[root@node2 ~]# cd /usr/local/
[root@node2 local]# ln -sv mariadb-5.5.43-linux-x86_64 mysql
[root@node2 local]# cd mysql/
[root@node2 mysql]# chown -R root.mysql ./*
[root@node2 mysql]# mkdir /etc/mysql
[root@node1 ~]# scp /etc/mysql/my.cnf node2:/etc/mysql
[root@node2 mysql]# cp support-files/mysql.server /etc/init.d/mysqld
[root@node2 mysql]# chkconfig --add mysqld
[root@node2 mysql]# chkconfig mysqld off
[root@node2 mysql]# service mysqld start
[root@node2 mysql]# /usr/local/mysql/bin/mysql
MariaDB [(none)]> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mydb |
| mysql |
| performance_schema |
| test |
+--------------------+
5 rows in set (0.02 sec)
MariaDB [(none)]> \q
[root@node2 mysql]# service mysqld stop
[root@node2 mysql]# umount /mydata/
[root@node3 ~]# yum -y install mysql
[root@node1 ~]# hb_gui &
一、在myservices组里添加myip
添加myservices组
类型选择“group
组名填写myservices
在myservice组里添加myip
在myservices组里添加mystore
在myservices组里添加myserver
添加顺序:
启动myservices组
测试1:myip、mystore、myserver
myip:
[root@node2 mysql]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:88:49:f0 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.122/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.88/24 brd 192.168.1.255 scope global secondary eth0
inet6 fe80::20c:29ff:fe88:49f0/64 scope link
valid_lft forever preferred_lft forever
结果:成功
mystore:
[root@node2 mysql]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
//192.168.1.31/software on /mnt type cifs (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
192.168.1.123:/mydata on /mydata type nfs (rw,vers=4,addr=192.168.1.123,clientaddr=192.168.1.122)
结果:成功
myserver:
[root@node2 mysql]# service mysqld status
MySQL running (21693) [ OK ]
结果:成功
测试2:设置node2结点为standby
测试myip、mystore、myserver均成功
[root@node1 ~]# /usr/local/mysql/bin/mysql
MariaDB [(none)]> grant all on *.* to 'root'@'192.168.1.%' identified by 'mageedu' ;
MariaDB [(none)]> flush privileges;
[root@node3 ~]# mysql -uroot -p -h192.168.1.88
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mydb |
| mysql |
| performance_schema |
| test |
+--------------------+
mysql> use mydb;
mysql> create table t1(id int,name char(30));
mysql> show tables;
+----------------+
| Tables_in_mydb |
+----------------+
| t1 |
+----------------+
mysql> \q
重新在node3上登录mysql
[root@node3 ~]# mysql -uroot -p -h192.168.1.88
此时激活node2,测试node3上的mysql
测试结果:mysql没有断线,数据正常
05heartbeat高可用ipvs
[root@node1 ~]# service heartbeat stop
[root@node2 ~]# service heartbeat stop
[root@node1 ~]# cd heartbeat2/
[root@node1 heartbeat2]# yum -y install ipvsadm perl-Mail-Send
[root@node1 heartbeat2]# yum localinstall heartbeat-ldirectord-2.1.4-12.el6.x86_64.rpm -y
[root@node2 ~]# cd heartbeat2/
[root@node2 heartbeat2]# yum localinstall heartbeat-ldirectord-2.1.4-12.el6.x86_64.rpm -y
[root@node3 ~]# vim rs.sh
#!/bin/bash
vip=192.168.1.88
interface="lo:0"
case $1 in
start)
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/eth0/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/eth0/arp_announce
ifconfig $interface $vip broadcast $vip netmask 255.255.255.255 up
route add -host $vip dev $interface
;;
stop)
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/eth0/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/eth0/arp_announce
ifconfig $interface down
;;
status)
if ifconfig lo:0 | grep $vip &> /dev/null; then
echo "ipvs is running."
else
echo "ipvs is stop."
fi
;;
*)
echo "Usage: `basename $0` {start|stop|status}"
exit 1
;;
esac
[root@node3 ~]# chmod +x rs.sh
[root@node3 ~]# ./rs.sh start
[root@node3 ~]# echo "<h1>Node3</h1>" > /var/www/html/index.html
[root@node3 ~]# service httpd start
[root@node3 ~]# curl http://192.168.1.123
<h1>Node3</h1>
[root@node1 ~]# ifconfig eth0:0 192.168.1.88 broadcast 192.168.1.88 netmask 255.255.255.255 up
[root@node1 ~]# route add -host 192.168.1.88 dev eth0:0
[root@node1 ~]# ipvsadm -A -t 192.168.1.88:80 -s rr
[root@node1 ~]# ipvsadm -a -t 192.168.1.88:80 -r 192.168.1.123 -g -w 1
[root@node1 ~]# ipvsadm -C
[root@node1 ~]# route del -host 192.168.1.88
[root@node1 ~]# ifconfig eth0:0 down
[root@node2 ~]# ifconfig eth0:0 192.168.1.88 broadcast 192.168.1.88 netmask 255.255.255.255 up
[root@node2 ~]# route add -host 192.168.1.88 dev eth0:0
[root@node2 ~]# ipvsadm -A -t 192.168.1.88:80 -s rr
[[root@node2 ~]# ipvsadm -a -t 192.168.1.88:80 -r 192.168.1.123 -g -w 1
[root@node2 ~]# ipvsadm -C
[root@node2 ~]# route del -host 192.168.1.88
[root@node2 ~]# ifconfig eth0:0 down
[root@node2 ~]# cd /usr/share/doc/heartbeat-ldirectord-2.1.4/
[root@node2 heartbeat-ldirectord-2.1.4]# cp ldirectord.cf /etc/ha.d/
[root@node2 heartbeat-ldirectord-2.1.4]# cd /etc/ha.d/
[root@node1 ~]# cp /usr/share/doc/heartbeat-ldirectord-2.1.4/ldirectord.cf /etc/ha.d/
[root@node1 ~]# cd /etc/ha.d/
[root@node1 ha.d]# vim ldirectord.cf
修改# Sample for an http virtual service(约23行)下面的内容为:
virtual=192.168.1.88:80
real=192.168.1.122:80 gate
fallback=127.0.0.1:80 gate
service=http
request=".health.html"
receive="OK"
scheduler=rr
#persistent=600
#netmask=255.255.255.255
protocol=tcp
checktype=negotiate
checkport=80
[root@node1 ha.d]# vim /var/www/html/index.html
<h1>Sorry Server</h1>
[root@node1 ~]# service httpd start
[root@node1 ~]# service heartbeat start
[root@node1 ~]# chkconfig httpd on
[root@node2 ~]# vim /var/www/index.html
<h1>Sorry server </h1>
[root@node2 ~]# service httpd start
[root@node2 ~]# chkconfig httpd on
[root@node2 ~]# service heartbeat start
[root@node1 ha.d]# scp ldirectord.cf node2:/etc/ha.d
[root@node3 ~]# vim /var/www/html/.health.html
OK
[root@node1 ~]# hb_gui
添加资源组ipvs
1)添加clusterip
2)添加ipvsrules
[root@node3 ~]# curl 192.168.1.88
<h1>Node3</h1>
[root@node3 ~]# cd /var/www/html/
[root@node3 html]# mv .health.html a.html
测试结果:没有成功。
到这里即可,后面mysql同上一节,无需看。