1. 部署前提

操作用户:hadoop

操作目录:/home/hadoop/apps

操作机器(3台):hadoop1,hadoop2,hadoop3

3台机器都是Centos7,安装了JDK1.8和一个已启动的Zookeeper-3.5.7集群。

以下是添加用户、配置免密登录,关闭防火墙等操作:

# 3台机器都需进行如下配置
# 新增hadoop用户
useradd hadoop
passwd hadoop
New password:
Retype new password:

授权 root 权限,在root下面加一条hadoop的hadoop ALL=(ALL) ALL
#修改权限
chmod 777 /etc/sudoers
vim /etc/sudoers
## Allow root to run any commands anywhere
root ALL=(ALL) ALL
hadoop ALL=(ALL) NOPASSWD:ALL
#恢复权限
chmod 440 /etc/sudoers

# 配置免密登录
#进入到我的home目录,
su - hadoop
ssh-keygen -t rsa (连续按四个回车)
#执行完这个命令后,会生成两个文件id_rsa(私钥)、id_rsa.pub(公钥)
#将公钥拷贝到要免密登录的机器上
ssh-copy-id hadoop2
ssh-copy-id hadoop3

# 主机名ip地址映射
sudo vim /etc/hosts
192.168.62.161 hadoop1
192.168.62.162 hadoop2
192.168.62.163 hadoop3

# 关闭防火墙
sudo systemctl stop firewalld
sudo systemctl disable firewalld

# 禁用selinux
sudo vim /etc/selinux/config
SELINUX=enforcing --> SELINUX=disabled

# 配置时间同步
# 安装ntpdate工具
sudo yum -y install ntp ntpdate
# 设置系统时间与网络时间同步
ntpdate 0.asia.pool.ntp.org
# 将系统时间写入硬件时间
hwclock --systohc


2. 集群节点规划

Hadoop-3.1.3高可用集群部署_yarn

此集群中Hadoop的NameNode是HA的,Yarn的ResourceManger也是HA的,从而保证了Hadoop集群的高可用。


3. 下载

在hadoop1机器上执行

wget https://archive.apache.org/dist/hadoop/common/hadoop-3.1.3/hadoop-3.1.3.tar.gz


4. 解压

tar -zxvf hadoop-3.1.3.tar.gz


5. 修改环境变量

# 添加环境变量
sudo vim /etc/profile
export HADOOP_HOME=/home/hadoop/apps/hadoop-3.1.3
export PATH=$PATH:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin

# 刷新配置
source /etc/profile


6. 修改配置文件

# 配置文件所在目录
cd /home/hadoop/apps/hadoop-3.1.3/etc/hadoop


6.1. 修改hadoop-env.sh

vim hadoop-env.sh
# Set Hadoop-specific environment variables here.
#指定JAVA_HOME
export JAVA_HOME=/opt/jdk1.8.0_212
#指定hadoop用户,hadoop3.x之后必须配置(我的用户名就叫hadoop)
export HDFS_NAMENODE_USER=hadoop
export HDFS_DATANODE_USER=hadoop
export HDFS_ZKFC_USER=hadoop
export HDFS_JOURNALNODE_USER=hadoop
export YARN_RESOURCEMANAGER_USER=hadoop
export YARN_NODEMANAGER_USER=hadoop


6.2. 修改core-site.xml

vim core-site.xml 
<configuration>
<!--集群名称-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value> </property>
<!--临时目录:提前创建好-->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/data/hadoop_tmp_data</value>
</property>
<!--webUI展示时的用户-->
<property>
<name>hadoop.http.staticuser.user</name>
<value>hadoop</value>
</property>
<!--高可用依赖的zookeeper的通讯地址-->
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop1:2181,hadoop2:2181,hadoop3:2181</value>
</property>
<!--用于连接hive server2(可选项)-->
<property>
<name>hadoop.proxyuser.hadoop.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hadoop.groups</name>
<value>*</value>
</property>
</configuration>


6.3. 修改hdfs-site.xml

vim hdfs-site.xml
<configuration>
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!--定义hdfs集群中的namenode的ID号-->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!--定义namenode的主机名和rpc协议的端口-->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>hadoop1:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>hadoop2:8020</value>
</property>
<!--定义namenode的主机名和http协议的端口-->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>hadoop1:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>hadoop2:9870</value>
</property>
<!--定义共享edits的url-->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/ns1</value>
</property>
<!--定义hdfs的客户端连接hdfs集群时返回active namenode地址-->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>

<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>

<!--hdfs集群中两个namenode切换状态时的隔离方法-->
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(/bin/true)</value>
</property>

<!--ha的hdfs集群自动切换namenode的开关-->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>

<!--hdfs集群中两个namenode切换状态时的隔离方法的秘钥-->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>

<!--journalnode集群中用于保存edits文件的目录:提前创建好-->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/hadoop/data/journalnode_data</value>
</property>
</configuration>


6.4. 修改yarn-site.xml

vim yarn-site.xml
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
<description>Enable RM high-availability</description>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yarn_cluster1</value>
<description>Name of the cluster</description>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
<description>The list of RM nodes in the cluster when HA is enabled</description>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>hadoop1</value>
<description>The hostname of the rm1</description>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>hadoop2</value>
<description>The hostname of the rm2</description>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>hadoop1:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>hadoop2:8088</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>hadoop1:2181,hadoop2:2181,hadoop3:2181</value>
</property>
<!-- 指定 resourcemanager 的状态信息存储在 zookeeper 集群 -->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<!-- 启用自动恢复 -->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
<value>true</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- 环境变量的继承 -->
<property>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>

<!--虚拟内存检查-->
<!--如果出现container启动不了,说是虚拟内存超限,那么请配置以下配置-->
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
<description>Whether virtual memory limits will be enforced for containers</description>
</property>
<property>
<name>yarn.nodemanager.vmem-pmem-ratio</name>
<value>6</value>
<description>Ratio between virtual memory to physical memory when setting memory limits for containers</description>
</property>
</configuration>


6.5. 修改workers文件

vim workers

hadoop1
hadoop2
hadoop3

6.6. 修改mapred-site.xml

vim mapred-site.xml

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

7. 分发目录

# 将配置好的hadoop-3.1.3目录分发到hadoop2,hadoop3机器中
scp -r /home/hadoop/apps/hadoop-3.1.3 hadoop2:/home/hadoop/apps/
scp -r /home/hadoop/apps/hadoop-3.1.3 hadoop3:/home/hadoop/apps/

在hadoop2,hadoop3机器上执行步骤5操作,即配置环境变量。


8. 启动集群

#启动journalnode,在hadoop1,hadoop2,hadoop3上分别执行
hdfs --daemon start journalnode

# 格式化namenode,在hadoop1或者hadoop2上执行,我这在hadoop1上执行了
hdfs namenode -format

# 启动namenode,在hadoop1上执行
hdfs --daemon start namenode

#在hadoop2上同步namenode数据后启动,在hadoop2上执行
hdfs namenode -bootstrapStandby
hdfs --daemon start namenode


# 在hadoop1上执行
hdfs zkfc -formatZK

# 至此,hadoop的hdfs服务已经启动完成。yarn服务启动可执行命令:start-yarn.sh

# 重启hdfs
stop-dfs.sh
start-dfs.sh

# 重启yarn
stop-yarn.sh
start-yarn.sh

# 直接同时重启hdfs和yarn
stop-all.sh
start-all.sh


9. 查看服务状态

9.1. 查看进程

我写了jpsall.sh脚本直接查看3台集群的进程,也可以单台机器分别执行jps命令查看。

cat jspall.sh

#!/bin/bash
# 执行jsp命令查看每台服务器上的节点状态

for i in hadoop1 hadoop2 hadoop3
do
echo "=====${i}所有服务====="
ssh hadoop@$i '/opt/jdk1.8.0_212/bin/jps'
done

sh jspall.sh


=====192.168.62.161所有服务=====
11616 JournalNode
11041 QuorumPeerMain
11797 DFSZKFailoverController
12296 NodeManager
12169 ResourceManager
11386 DataNode
11261 NameNode
29645 Jps
=====192.168.62.162所有服务=====
50000 NameNode
50196 JournalNode
50471 ResourceManager
50296 DFSZKFailoverController
49897 QuorumPeerMain
50553 NodeManager
50076 DataNode
57758 Jps
=====192.168.62.163所有服务=====
25748 JournalNode
25546 QuorumPeerMain
25643 DataNode
25867 NodeManager
29083 Jps

9.2. 查看web页面

Hadoop的web页面:​​​​​http://hadoop1:9870​​​

Hadoop-3.1.3高可用集群部署_hadoop_02


Yarn的web页面:​​​​​http://hadoop1:8088​​​ 

Hadoop-3.1.3高可用集群部署_yarn_03