实验环境

centos7_x64

192.168.10.18    hadoop_master/zk1/hbase_master

192.168.10.10    hadoop_node1/zk2/hbase_node1


实验软件

jdk-8u172-linux-x64.tar.gz 

hadoop-2.6.4.tar.gz 

apache-zookeeper-3.5.8-bin.tar.gz

hbase-2.2.5-bin.tar.gz


软件安装

hostnamectl set-hostname  master &&  bash   hadoop_master执行

hostnamectl set-hostname node    &&   bash   hadoop_slave执行


cp -pv /etc/hosts /etc/hosts.bak

cat >> /etc/hosts <<  EOF

> 192.168.10.18  master

> 192.168.10.10  node

EOF 



配置ssh免密登入

ssh-keygen -t rsa       

ssh-copy-id -i  root@192.168.10.10

ssh root@192.168.10.10 'systemctl restart chronyd && systemctl enable chronyd && hwclock -w'     


配置java

tar zxvf /root/jdk-8u172-linux-x64.tar.gz  &&   mv /root/jdk1.8.0_172 /usr/local/java

cp -pv /etc/profile /etc/profile.bak

cat >>  /etc/profile  <<  EOF

> export JAVA_HOME=/usr/local/java

> export PATH=$PATH:$JAVA_HOME/bin

> export JAVA_HOME=/usr/local/{hadoop_master,hadoop_node1

> export PATH=$PATH:$JAVA_HOME/bin

> export HBASE_HOME=/usr/local/{hbase_master,hbase_node1}

> export PATH=$PATH:$HBASE_HOME/bin

EOF && source /etc/profile && java --version

java version "1.8.0_172"   hadoop_master/slave操作


配置hadoop_master

groupadd hadoop  &&  useradd hadoop -g hadoop

mkdir -pv /usr/local/hadoop_master/{data,logs}

chown -R hadoop:hadoop /usr/local/hadoop_master


tar zxvf hadoop-2.6.4.tar.gz   &&  mv /root/hadoop-2.6.4  /usr/local/hadoop_master

cd /usr/local/hadoop_master/etc/hadoop

cp -pv .bak

cat | grep JAVA_HOME

export JAVA_HOME=/usr/local/java    hadoop_master操作


cp -pv  core-site.xml  core-site.xml.bak

cat core-site.xml

<configuration>

   <property>

       <name>fs.defaultFS</name>

       <value>hdfs://master:9000</value>

   </property>


   <property>

       <name>hadoop.tmp.dir</name>

       <value>/usr/local/hadoop_master/data</value>

   </property>

</configuration>


cp -pv   hdfs-site.xml hdfs-site.xml.bak

cat hdfs-site.xml

<configuration>

   <property>

       <name>dfs.replication</name>

       <value>2</value>       副本数

   </property>

</configuration>


cp -pv  mapred-site.xml  mapred-site.xml.bak

cat mapred-site.xml

<configuration>

   <property>

       <name>mapreduce.framework.name</name>

       <value>yarn</value>

   </property>

</configuration>


cp -pv yarn-site.xml yarn-site.xml.bak

cat yarn-site.xml

<configuration>

   <property>

      <name>yarn.resourcemanager.hostname</name>

      <value>master</value>

   </property>


   <property>

      <name>yarn.nodemanager.aux-services</name>

      <value>master</value>

   </property>

</configuration>

     

cp  -pv slaves slaves.bak  

echo master > slaves  &&   echo node >>  slaves


配置hadoop_slave

groupadd hadoop  &&  useradd hadoop -g hadoop

mkdir -pv /usr/local/hadoop_node1/{data,logs}

chown -R hadoop:hadoop /usr/local/hadoop_node1


tar zxvf hadoop-2.6.4.tar.gz   &&  mv /root/hadoop-2.6.4 /usr/local/hadoop_node1

cd /usr/local/hadoop_node1/etc/hadoop

cp -pv .bak

cat | grep JAVA_HOME

export JAVA_HOME=/usr/local/java    hadoop_slave操作


cp -pv core-site.xml  core-site.xml.bak

cat core-site.xml

<configuration>

   <property>

       <name>fs.defaultFS</name>

       <value>hdfs://node:9000</value>

   </property>

   <property>

       <name>hadoop.tmp.dir</name>

       <value>/usr/local/hadoop_node1/data</value>

   </property>

</configuration>


cp -pv yarn-site.xml yarn-site.xml.bak

cat yarn-site.xml

<configuration>

   <property>

      <name>yarn.resourcemanager.hostname</name>

      <value>node</value>

   </property>

   <property>

      <name>yarn.nodemanager.aux-services</name>

      <value>mapreduce_shuffle</value>

   </property>

</configuration>

其他配置文件 和 hadoop_master 相同


/usr/local/{hadoop_master,hadoop_node1}/bin/hadoop   namenode  -format

bash  /usr/local/{hadoop_master,hadoop_node1}/sbin/  &

初始化   hadoop_master/node操作 


jps

11504 QuorumPeerMain

12197 ResourceManager

12523 Jps

11806 NameNode

12063 SecondaryNameNode

11919 DataNode


配置zookeeper

tar zxvf  apache-zookeeper-3.5.8-bin.tar.gz &&  mv apache-zookeeper-3.5.8-bin /usr/local/{zk1,zk2}

mkdir -pv /usr/local/{zk1,zk2}/data  


cd  /usr/local/{zk1,zk2}/conf

cp -pv zoo_sample.cfg   zoo_sample.cfg.bak   

mv   zoo_sample.cfg    zoo.cfg

echo 1 > /usr/local/zk1/data/myid        群集id 不可相同


cat /usr/local/{zk1,zk2}/conf/zoo.cfg 

dataDir=/usr/local/{zk1,zk2}/data/

clientPort=2181

maxClientCnxns=100

tickTime=2000

initLimit=10

syncLimit=5

zookeeper.connect=192.168.10.18:2181

zookeeper.connect=192.168.10.10:2181 


rsync -avz  /usr/local/zk1/   root@192.168.10.10:/usr/local/zk2  

echo 2 > /usr/local/zk2/data/myid                         

sed -i.bak 's/zk1/zk2/g' /usr/local/zk2/conf/zoo.cfg     

bash  /usr/local/{zk1,zk2}/bin/ start & 

Starting zookeeper ... STARTED    启动服务   hadoop_master/slave 操作


配置hbase_master

tar zxvf hbase-2.2.5-bin.tar.gz &&    mv hbase-2.2.5 /usr/local/hbase_master

mkdir -pv /usr/local/hbase_master/logs


cd /usr/local/hbase_master/conf/

cp -pv  .bak

cat 

export JAVA_HOME=/usr/local/java/

export HBASE_MANAGES_ZK=false


cp -pv hbase-site.xml  hbase-site.xml.bak

cat  hbase-site.xml

<configuration>

<property>

   <name>hbase.rootdir</name>

   <value>hdfs://master:9000/hbase</value>

</property>

<property>

    <name>hbase.cluster.distributed</name>

    <value>true</value>

</property>

</configuration>


cp -pv regionservers regionservers.bak

echo master > regionservers  &&   echo node  >> regionservers


配置hbase_slave

tar zxvf hbase-2.2.5-bin.tar.gz &&    mv hbase-2.2.5 /usr/local/hbase_node1

mkdir -pv /usr/local/hbase_node1/logs


cd /usr/local/hbase_node1/conf/

cp -pv  .bak

cat 

export JAVA_HOME=/usr/local/java/

export HBASE_MANAGES_ZK=false


cp -pv hbase-site.xml  hbase-site.xml.bak

cat  hbase-site.xml

<configuration>

<property>

   <name>hbase.rootdir</name>

   <value>hdfs://node:9000/hbase</value>

</property>

<property>

    <name>hbase.cluster.distributed</name>

    <value>true</value>

</property>

</configuration>


cp -pv regionservers regionservers.bak

echo master > regionservers  &&   echo node  >> regionservers


bash /usr/local/{hbase_master,hbase_node1}/bin/  &     

启动服务 hbase_master/node操作


netstat -tuplna | grep LISTEN

tcp        0      0 0.0.0.0:50070          0.0.0.0:*       LISTEN      2252/java    

tcp        0      0 ::ffff:192.168.10.18:8088   :::*      LISTEN      7803/java

tcp        0      0 :::2181                     :::*              LISTEN      4170/java

tcp        0      0 192.168.10.18:9000   0.0.0.0:*     LISTEN      11806/java

tcp        0      0 ::ffff:192.168.10.18:16010  ::ffff:192.168.10.12:50746  FIN_WAIT2   3965/java

   

http://serverip:50070/dfshealth.html#tab-overview

hadoop+hbase+zookeeper分布式集群_应用程序

http://severip:8088/

hadoop+hbase+zookeeper分布式集群_应用程序_02

http://serviceip:16010/master-status

hadoop+hbase+zookeeper分布式集群_应用程序_03