Activemq Cluster environment

role                 IP                  hostname         
master:   10.0.0.4                 cdh1
slave 1:   10.0.0.5                 cdh2
slave 2:   10.0.0.6                 cdh3


1. Install JDK on your Server

Note:The same steps on each node (include cdh1, cdh2, and cdh3)

To elevate permissions, run command:

[azureuser@cdh1~]$ sudo su -

step 1: Download and install Jdk 7

[root@cdh1 ~]# rpm -qa|grep jdk|xargs rpm -e --nodeps
[root@cdh1 ~]# wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie"
sudo tar zxvf jdk-7u67-linux-x64.tar.gz -C /opt/

step 2: Configuration environment variable

cat >/etc/profile.d/java.sh<<EOF
export JAVA_HOME=/opt/jdk1.7.0_67
export PATH=\$PATH:\$JAVA_HOME/bin
EOF

step 3: Take effect immediately

[root@cdh1 ~]# source /etc/profile.d/java.sh

[root@cdh1 ~]# echo "10.0.0.4 cdh1" >>/etc/hosts
[root@cdh1 ~]# echo "10.0.0.5 cdh2" >>/etc/hosts
[root@cdh1 ~]# echo "10.0.0.6 cdh3" >>/etc/hosts




[root@cdh1 ~]# curl -LO http://archive.cloudera.com/cdh5/one-click-install/redhat/6/x86_64/cloudera-cdh-5-0.x86_64.rpm
 
[root@cdh1 ~]# yum localinstall cloudera-cdh-5-0.x86_64.rpm -y
[root@cdh1 ~]# yum clean all -y
[root@cdh1 ~]# yum repolist
 
[root@cdh1 ~]# rpm --import http://archive.cloudera.com/cdh5/redhat/5/x86_64/cdh/RPM-GPG-KEY-cloudera
 



[root@cdh1 ~]# yum install zookeeper* -y
 
cat >/etc/zookeeper/conf/zoo.cfg <<EOF
maxClientCnxns=50
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/var/lib/zookeeper
clientPort=2181
server.1=cdh1:2888:3888
server.2=cdh2:2888:3888
server.3=cdh3:2888:3888
EOF
 
on cdh1
[root@cdh1 ~]# /etc/init.d/zookeeper-server init --myid=1 && /etc/init.d/zookeeper-server start

on cdh2
[root@cdh2 ~]# /etc/init.d/zookeeper-server init --myid=2 && /etc/init.d/zookeeper-server start

on cdh3
[root@cdh3 ~]# /etc/init.d/zookeeper-server init --myid=3 && /etc/init.d/zookeeper-server start



[root@cdh2 ~]# zookeeper-client -server cdh1:2181



step1: Download latest activemq
[root@cdh1 ~]# wget http://www.us.apache.org/dist/activemq/5.11.1/apache-activemq-5.11.1-bin.tar.gz
 
step2: Extract archive

[root@cdh1 ~]# tar zxvf apache-activemq-5.11.1-bin.tar.gz -C /opt/ 

[root@cdh1 ~]# cd /opt/apache-activemq-5.11.1/conf




    <persistenceAdapter>
        <kahaDB directory="${activemq.data}/kahadb"/>
    </persistenceAdapter>


  6. 2.  After activemq.xml

  Note: Each node of the hostname must be different.

 

<persistenceAdapter>
        <replicatedLevelDB
         directory="${activemq.data}/leveldb"
         replicas="3"
         bind="tcp://0.0.0.0:0"
         zkAddress="cdh1:2181,cdh2:2181,cdh3:2181"
         hostname="cdh1"
         sync="local_disk"
         zkPath="/activemq/leveldb-stores"
         />
 </persistenceAdapter>

7. Testing activemq high availability

step 1: Start ActiveMQ on each node
[root@cdh1 ~]# cd /opt/apache-activemq-5.11.1/

[root@cdh1 apache-activemq-5.11.1]# ./bin/activemq start

step 2: Verify Service on each node
Note: We can see only the 61616 port of the CDH1 node is working
[root@cdh1 ~]# netstat -tulpn | grep 61616


step 3: Stop ActiveMQ on cdh1
[root@cdh1 ~]# cd /opt/apache-activemq-5.11.1/
 
[root@cdh1 apache-activemq-5.11.1]# ./bin/activemq stop
13. Output results of cdh1 and cdh2 are compared.
Note: we can see cdh2 node has taken over the cdh1's work.