RabbitMQ高可用集群安装部署文档

架构图

 

rabbitmq centos docker 集群_运维

 

 

1)RabbitMQ集群元数据的同步

RabbitMQ集群会始终同步四种类型的内部元数据(类似索引):
a.队列元数据:队列名称和它的属性;
b.交换器元数据:交换器名称、类型和属性;
c.绑定元数据:一张简单的表格展示了如何将消息路由到队列;
d.vhost元数据:为vhost内的队列、交换器和绑定提供命名空间和安全属性;

2)集群配置方式

cluster:不支持跨网段,用于同一个网段内的局域网;可以随意的动态增加或者减少;节点之间需要运行相同版本的 RabbitMQ 和 Erlang。

节点类型

RAM node:内存节点将所有的队列、交换机、绑定、用户、权限和 vhost 的元数据定义存储在内存中,好处是可以使得像交换机和队列声明等操作更加的快速。

Disk node:将元数据存储在磁盘中,单节点系统只允许磁盘类型的节点,防止重启 RabbitMQ 的时候,丢失系统的配置信息。

解决方案:设置两个磁盘节点,至少有一个是可用的,可以保存元数据的更改。

Erlang Cookie

erlang Cookie 是保证不同节点可以相互通信的密钥,要保证集群中的不同节点相互通信必须共享相同的 Erlang Cookie

3)搭建RabbitMQ集群所需要安装的组件

a.Jdk 1.8
b.Erlang运行时环境

c.RabbitMq的Server组件

 

1、安装yum源文件

# rpm -Uvh https://download.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
2、安装Erlang
# yum -y install erlang
3、配置java环境 /etc/profile
JAVA_HOME=/usr/local/java/jdk1.8.0_151 
 PATH=$JAVA_HOME/bin:$PATH 
 CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar 4、安装配置rabbitmq
# tar -xf rabbitmq-server-generic-unix-3.6.15.tar -C /usr/local/
# mv /usr/local/rabbitmq_server-3.6.15/ /usr/local/rabbitmq
 
5、配置RabbitMQ环境变量/etc/profile
RABBITMQ_HOME=/usr/local/rabbitmq
 PATH=$PATH:$ERL_HOME/bin:/usr/local/rabbitmq/sbin
 # source /etc/profile6、修改主机配置文件/etc/hosts
192.168.2.208 rabbitmq-node1
192.168.2.41 rabbitmq-node2
192.168.2.40 rabbitmq-node3
各个主机修改配置文件保持一致
# /root/.erlang.cookie
 
7、后台启动rabbitmq
# /usr/local/rabbitmq/sbin/rabbitmq-server -detached
添加用户
# rabbitmqctl add_user admin admin
给用户授权
# rabbitmqctl set_user_tags admin administrator
# rabbitmqctl set_permissions -p / admin ".*" ".*" ".*"
启用插件,可以使用rabbitmq管理界面
# rabbitmq-plugins enable rabbitmq_management
查看用户列表
# rabbitmqctl list_users
查看节点状态
# rabbitmqctl status
查看集群状态
# rabbitmqctl cluster_status
查看插件                                                                                     
# rabbitmq-plugins list
添加防火墙规则/etc/sysconfig/iptables
-A INPUT -m state --state NEW -m tcp -p tcp --dport 27017 -j ACCEPT
 -A INPUT -m state --state NEW -m tcp -p tcp --dport 28017 -j ACCEPT-A INPUT -m state --state NEW -m tcp -p tcp --dport 15672 -j ACCEPT
 -A INPUT -m state --state NEW -m tcp -p tcp --dport 5672 -j ACCEPT
 -A INPUT -m state --state NEW -m tcp -p tcp --dport 25672 -j ACCEPT 
8、添加集群node节点,从节点执行(目前配置2个节点)
# rabbitmqctl stop_app
# rabbitmqctl join_cluster  --ram rabbit@rabbitmq-node2
或者
# rabbitmqctl join_cluster  rabbit@rabbitmq-node2
# rabbitmqctl change_cluster_node_type  ram
启动节点
#rabbitmqctl start_app
9、删除集群node 节点删除
1.  rabbitmq-server -detached
2. rabbitmqctl stop_app
3. rabbitmqctl reset 
4. rabbitmqctl start_app
登陆管理界面查看集群状态http://IP:15672/

rabbitmq centos docker 集群_运维_02

 

 

设置镜像队列策略

在web界面,登陆后,点击“Admin--Virtual Hosts(页面右侧)”,在打开的页面上的下方的“Add a new virtual host”处增加一个虚拟主机,同时给用户“admin”和“guest”均加上权限

1、

 

rabbitmq centos docker 集群_元数据_03

2、

rabbitmq centos docker 集群_配置文件_04

 

 

# rabbitmqctl set_policy -p hasystem ha-allqueue "^" '{"ha-mode":"all"}' -n rabbit
"hasystem" vhost名称, "^"匹配所有的队列, ha-allqueue 策略名称为ha-all, '{"ha-mode":"all"}' 策略模式为 all 即复制到所有节点,包含新增节点,则此时镜像队列设置成功.
 rabbitmqctl set_policy [-p Vhost] Name Pattern Definition [Priority]
-p Vhost: 可选参数,针对指定vhost下的queue进行设置
Name: policy的名称
Pattern: queue的匹配模式(正则表达式)
Definition:镜像定义,包括三个部分ha-mode, ha-params, ha-sync-mode
ha-mode:指明镜像队列的模式,有效值为 all/exactly/nodes
all:表示在集群中所有的节点上进行镜像
exactly:表示在指定个数的节点上进行镜像,节点的个数由ha-params指定
nodes:表示在指定的节点上进行镜像,节点名称通过ha-params指定
ha-params:ha-mode模式需要用到的参数
ha-sync-mode:进行队列中消息的同步方式,有效值为automatic和manual
priority:可选参数,policy的优先级 
注以上集群配置完成

高可用HA配置

Haproxy 负载均衡,keepalived实现健康检查HA服务安装配置

解压文件

# tar -zxf haproxy-1.8.17.tar.gz
查看内核版本
# uname –r 
# yum -y  install  gcc   gcc-c++  make
切换到解压目录执行安装
# make TARGET=3100  PREFIX=/usr/local/haproxy
# make install   PREFIX=/usr/local/haproxy
创建配置文件相关目录
# mkdir /usr/local/haproxy/conf
# mkdir /var/lib/haproxy/
# touch /usr/local/haproxy/haproxy.cfg
# groupadd haproxy
# useradd haproxy -g haproxy
# chown -R haproxy.haproxy /usr/local/haproxy
# chown -R haproxy.haproxy  /var/lib/haproxy

配置文件
global
    log         127.0.0.1 local2
 
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#监控MQ管理平台
listen rabbitmq_admin 
    bind 0.0.0.0:8300
 server rabbitmq-node1 192.168.2.208:15672 
 server rabbitmq-node2 192.168.2.41:15672 
 server rabbitmq-node3 192.168.2.40:15672 
#rabbitmq_cluster监控代理
listen rabbitmq_local_cluster 
      bind 0.0.0.0:8200
      #配置TCP模式
      mode tcp
      option tcplog
      #简单的轮询
      balance roundrobin
      #rabbitmq集群节点配置
  server rabbitmq-node1 192.168.2.208:5672 check inter 5000 rise 2 fall 2
  server rabbitmq-node2 192.168.2.41:5672 check inter 5000 rise 2 fall 2
  server rabbitmq-node3 192.168.2.40:5672 check inter 5000 rise 2 fall 2
#配置haproxy web监控,查看统计信息
listen private_monitoring 
      bind 0.0.0.0:8100
      mode http
      option httplog
      stats enable
      #设置haproxy监控地址为http://localhost:8100/stats
      stats uri /stats
      stats refresh 30s
      #添加用户名密码认证
      stats auth admin:admin
 
启动haproxy服务
# /usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/conf/haproxy.cfg
#Keepalived 源码安装
软件包路径 /usr/local/src 
安装路径 /usr/local/keepalived
配置文件/etc/keepalived/keeplived.conf
# tar -zxf keepalived-2.0.10.tar.gz
#安装依赖包
# yum -y install openssl-devel libnl libnl-devel libnfnetlink-devel
# ./configure --prefix=/usr/local/keepalived && make && make install创建keepalived配置文件目录
#mkdir /etc/keepalived拷贝配置文件到/etc/keepalived目录下
# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/复制keepalived脚本到/etc/init.d/ 目录
# cp /usr/local/src/keepalived-2.0.10/keepalived/etc/init.d/keepalived /etc/init.d/
拷贝keepalived脚本到/etc/sysconfig/ 目录
# cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/建立软连接
# ln -s /usr/local/keepalived/sbin/keepalived /sbin/添加到开机启动
# chkconfig keepalived on
查看服务状况
# systemctl status keepalivedKeepalived启动
# systemctl start keepalived
master 配置文件
#Master :
global_defs {
 notification_email {
 13486157333m@sina.cn
 }
 notification_email_from 13486157333m@sina.cn
 smtp_server 127.0.0.1
 smtp_connect_timeout 30
 router_id NGINX_DEVEL
}
vrrp_script chk_haproxy {
 script "/usr/local/keepalived/check_haproxy.sh" 
 interval 2 
 weight 2
 fall 3
 rise 2 
}
vrrp_instance haproxy_1 {
 state MASTER 
 interface ens33
 virtual_router_id 104
 priority 150 
 advert_int 1
 mcast_src_ip 192.168.2.41
authentication {
 auth_type PASS
 auth_pass 1111
 }
track_interface {
 ens33
 }
track_script {
 check_haproxy.sh
 }
virtual_ipaddress {
 192.168.33.110
 }
}#virtual_server 192.168.2.110 80 { 
# delay_loop 6 # 设置健康检查时间,单位是秒
# lb_algo wrr # 设置负载调度的算法为wlc 
# lb_kind DR # 设置LVS实现负载的机制,有NAT、TUN、DR三个模式 
# nat_mask 255.255.255.0
# persistence_timeout 0 
# protocol TCP 
# real_server 192.168.220.128 80 { # 指定real server1的IP地址 
# weight 3 # 配置节点权值,数字越大权重越高
#TCP_CHECK {
# connect_timeout 10
# nb_get_retry 3
# delay_before_retry 3
# connect_port 80
# }
# }
# }
} 
#Slave :
global_defs {
 notification_email {
 13486157333m@sina.cn
 }
 notification_email_from 13486157333m@sina.cn
 smtp_server 127.0.0.1
 smtp_connect_timeout 30
 router_id NGINX_DEVEL
}
vrrp_script chk_haproxy {
 script "/usr/local/keepalived/check_haproxy.sh" 
 interval 2 
 weight 2
 fall 3
 rise 2 
}
vrrp_instance haproxy_2 {
 state SLAVE
 interface ens33
 virtual_router_id 104
 priority 150 
 advert_int 1
 mcast_src_ip 192.168.2.208
authentication {
 auth_type PASS
 auth_pass 1111
 }
track_interface {
 ens33
 }
track_script {
 check_haproxy.sh
 }
virtual_ipaddress {
 192.168.2.246
 }
}#virtual_server 192.168.2.110 80 { 
# delay_loop 6 # 设置健康检查时间,单位是秒
# lb_algo wrr # 设置负载调度的算法为wlc 
# lb_kind DR # 设置LVS实现负载的机制,有NAT、TUN、DR三个模式 
# nat_mask 255.255.255.0
# persistence_timeout 0 
# protocol TCP 
# real_server 192.168.220.128 80 { # 指定real server1的IP地址 
# weight 3 # 配置节点权值,数字越大权重越高
#TCP_CHECK {
# connect_timeout 10
# nb_get_retry 3
# delay_before_retry 3
# connect_port 80
# }
# }
# }
}haproxy检测
#!/bin/bash
HaproxyStatus=`ps -C haproxy --no-header | wc -l`
if [ $HaproxyStatus-eq 0 ];then
/etc/init.d/haproxy start
sleep 3
if [ `ps -C haproxy --no-header | wc -l ` -eq 0 ];then
/etc/init.d/keepalived stop
fi
fi