--环境信息:

namenodemaster.hadoop

公网:122.225.77.45

内网:192.168.1.45

yarnserver.hadoop

公网:122.225.77.46

内网:192.168.1.46

slave001.hadoop

公网:122.225.77.47

内网:192.168.1.47

slave002.hadoop

公网:122.225.77.48

内网:192.168.1.48

 

--安装所需的包:

ambari-2.7.4.0-centos7.tar.gz
HDP-3.1.4.0-centos7-rpm.tar.gz
HDP-GPL-3.1.4.0-centos7-gpl.tar.gz
HDP-UTILS-1.1.0.22-centos7.tar.gz
jdk-8u211-linux-x64.tar.gz
mysql57-community-release-el7-10.noarch.rpm

下载:

链接:https://pan.baidu.com/s/1TIk73wgNiRSO2OixEL5w_g 提取码:ke4e

下载链接:

HDP:http://public-repo-1.hortonworks.com/HDP/centos7/3.x/updates/3.1.4.0/HDP-3.1.4.0-centos7-rpm.tar.gzHDP-UTILS:http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.22/repos/centos7/HDP-UTILS-1.1.0.22-centos7.tar.gzHDP-GPL:http://public-repo-1.hortonworks.com/HDP-GPL/centos7/3.x/updates/3.1.4.0/HDP-GPL-3.1.4.0-centos7-gpl.tar.gzAmbari 2.7.4:http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.7.4.0/ambari-2.7.4.0-centos7.tar.gz

 

提前准备:

1、四台服务器关闭防火墙、selinux

2、配置java环境

参考:

3、设置NTP

5、设置秘钥登录

参考:

6、修改四台服务器的hosts

7、修改yum源

8、安装数据库

参考:

配置NTP

以namenodemaster.hadoop为ntp校准服务器:
下载ntp

yum -y install ntp
依次输入以下命令
systemctl is-enabled ntpd
systemctl enable ntpd
systemctl start ntpdmaster服务器配置
[root@namenodemaster opt]# vim /etc/ntp.conf 
driftfile /var/lib/ntp/drift
restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
server 192.168.1.45
server 127.127.1.0
fudge  127.127.1.0  stratum  10
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys
disable monitor其他节点配置如下
[root@yarnserver opt]# vim /etc/ntp.conf
driftfile /var/lib/ntp/drift
restrict default nomodify notrap nopeer noquery
restrict 127.0.0.1
restrict ::1
server namenodemaster.hadoop
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys
disable monitor

手动触发同步

[root@slave005 ~]# ntpdate namenodemaster

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop

 

 检查是否成功用“ntpstat”命令查看同步状态,出现以下状态代表启动成功

[root@namenodemaster opt]# ntpstat

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_02

 

 如果是这个状态,就需要等很长时间,但是配置是对的

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_03

 

 

设置秘钥登录

需要注意

服务器端的 authorized_keys这个只能是600权限

在namenodemaster.hadoop上需要运行

cat id_rsa.pub >> authorized_keys

最终的结果是namenodemaster.hadoop、yarnserver.hadoop能相互使用秘钥登录,同时都能秘钥登录slave01   slave02

 修改hosts

在四台服务器上分别运行

echo -e "192.168.1.45 namenodemaster.hadoop namenodemaster \n192.168.1.46 yarnserver.hadoop yarnserver \n192.168.1.47 slave001.hadoop slave001 \n192.168.1.48 slave002.hadoop slave002" >> /etc/hosts

主机名一定要配置正确。如何验证主机名(DQDN)配置是否OK,

输入命令hostname -f进行查看

 

修改yum源

四台服务器分别

1、备份本地yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak
2.获取阿里yum源配置文件

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum makecache

创建数据库

创建库ambari、hive、oozie、ranger、rangerkms

CREATE DATABASE ambari CHARACTER SET utf8 COLLATE utf8_general_ci; 
use ambari; 
CREATE USER 'ambari'@'%' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'ambari'@'%'; 
CREATE USER 'ambari'@'localhost' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'ambari'@'localhost'; 
CREATE USER 'ambari'@'master' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'ambari'@'master'; 
FLUSH PRIVILEGES; 
source /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql 
show tables; 
use mysql; 
select Host User Password from user where user='ambari';

CREATE DATABASE hive CHARACTER SET utf8 COLLATE utf8_general_ci; 
use hive; 
CREATE USER 'hive'@'%' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'hive'@'%'; 
CREATE USER 'hive'@'localhost' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'hive'@'localhost'; 
CREATE USER 'hive'@'master' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'hive'@'master'; 
FLUSH PRIVILEGES; 

CREATE DATABASE oozie CHARACTER SET utf8 COLLATE utf8_general_ci; 
use oozie; 
CREATE USER 'oozie'@'%' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'oozie'@'%'; 
CREATE USER 'oozie'@'localhost' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'oozie'@'localhost'; 
CREATE USER 'oozie'@'master' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'oozie'@'master'; 
FLUSH PRIVILEGES;CREATE DATABASE ranger CHARACTER SET utf8 COLLATE utf8_general_ci; 
use ranger; 
CREATE USER 'rangeradmin'@'%' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'rangeradmin'@'%'; 
CREATE USER 'rangeradmin'@'localhost' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'rangeradmin'@'localhost'; 
CREATE USER 'rangeradmin'@'master' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'rangeradmin'@'master'; 
FLUSH PRIVILEGES;CREATE DATABASE rangerkms CHARACTER SET utf8 COLLATE utf8_general_ci; 
use rangerkms; 
CREATE USER 'rangerkms'@'%' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'rangerkms'@'%'; 
CREATE USER 'rangerkms'@'localhost' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'rangerkms'@'localhost'; 
CREATE USER 'rangerkms'@'master' IDENTIFIED BY '123456ABcd'; 
GRANT ALL PRIVILEGES ON *.* TO 'rangerkms'@'master'; 
FLUSH PRIVILEGES;

配置mysql驱动

[root@master yum.repos.d]# yum install mysql-connector-java -y
[root@master yum.repos.d]# cd /usr/share/java
[root@master yum.repos.d]# chmod 644 mysql-connector-java.jar
[root@master yum.repos.d]# cp /usr/share/java/mysql-connector-java.jar /var/lib/ambari-server/resources/mysql-jdbc-driver.jar
[root@master yum.repos.d]# vim /etc/ambari-server/conf/ambari.properties
添加server.jdbc.driver.path=/usr/share/java/mysql-connector-java.jar
ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar

离线安装ambari

1、安装httpd服务 - 主服务器

[root@namenodemaster opt]# yum -y install httpd
[root@namenodemaster opt]# systemctl start httpd
[root@namenodemaster opt]# systemctl enable httpd



2、上传ambari-2.7.4.0-centos7.tar.gz   HDP-UTILS-1.1.0.22-centos7.tar.gz   HDP-3.1.4.0-centos7-rpm.tar.gz  放到/var/www/html目录下 - 主服务器
安装完成后,会生成 /var/www/html目录(相当于Tomcat的webapps目录),进入到/var/www/html目录下,
创建ambari和hdp目录,用来存放安装文件.

[root@yum ~]# mkdir /var/www/html/ambari [root@yum ~]# mkdir /var/www/html/hdp
[root@yum ~]# mkdir /var/www/html/hdp/HDP-UTILS-1.1.0.22
[root@yum ~]# tar -zxvf ambari-2.7.4.0-centos7.tar.gz -C /var/www/html/ambari/
[root@yum ~]# tar -zxvf HDP-3.1.0.0-centos7-rpm.tar.gz -C /var/www/html/hdp/
[root@yum ~]# tar -zxvf HDP-UTILS-1.1.0.22-centos7.tar.gz -C /var/www/html/hdp/HDP-UTILS-1.1.0.22/

3、现在可以通过访问

http://122.225.77.45/ambari/
http://122.225.77.45/hdp/

Ambari 大数据安装 spark ambari安装hadoop(centos7)_Ambari 大数据安装 spark_04

 

 

4、制作本地源-主服务器

cd /etc/yum.repos.d/

vi -b  ambari.repo

#VERSION_NUMBER=2.7.4.0-118
[ambari-2.7.4.0]
#json.url = http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json
name=ambari Version - ambari-2.7.4.0
baseurl=http://122.226.77.45/ambari/ambari/centos7/2.7.4.0-118/
gpgcheck=1
gpgkey=http://122.226.77.45/ambari/ambari/centos7/2.7.4.0-118/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins
enabled=1
priority=1

vi -b  hdp.repo

#VERSION_NUMBER=3.1.4.0-315
[HDP-3.1.4.0]
name=HDP Version - HDP-3.1.4.0
baseurl=http://122.226.77.45/hdp/HDP/centos7/3.1.4.0-315/
gpgcheck=1
gpgkey=http://122.226.77.45/ambari/ambari/centos7/2.7.4.0-118/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins
enabled=1
priority=1

vi -b  hdp-utils.repo

[HDP-UTILS-1.1.0.22]
name=HDP-UTILS Version - HDP-UTILS-1.1.0.22
baseurl=http://122.226.77.45/hdp/HDP-UTILS-1.1.0.22/HDP-UTILS/centos7/1.1.0.22/
gpgcheck=1
gpgkey=http://122.226.77.45/hdp/HDP-UTILS-1.1.0.22/HDP-UTILS/centos7/1.1.0.22/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins
enabled=1
priority=1

 vi -b hdp.gpl.repo

#VERSION_NUMBER=3.1.4.0-315
[HDP-3.1-GPL-repo-1]
name=HDP-GPL Version - HDP-GPL-3.1.4.0
baseurl=http://122.226.77.45/hdp/HDP-GPL/centos7/3.1.4.0-315/
gpgcheck=1
gpgkey=http://122.226.77.45/hdp/HDP-GPL/centos7/3.1.4.0-315/RPM-GPG-KEY//RPM-GPG-KEY-Jenkins
enabled=1
priority=1

 

清理一下yum缓存

[root@master ambari]# yum clean all
[root@master ambari]# yum makecache
[root@master ambari]# yum repolist



将创建好的文件拷贝到其他服务器

scp -P 10022 ambari.repo hdp.repo hdp-utils.repo root@yarnserver.hadoop:$PWD
scp -P 10022 ambari.repo hdp.repo hdp-utils.repo root@slave001.hadoop:$PWD
scp -P 10022 ambari.repo hdp.repo hdp-utils.repo root@slave002.hadoop:$PWD

初始化ambari-server

[root@master ~]# ambari-server setup
(1) 提示是否自定义设置。输入:y
Customize user account for ambari-server daemon [y/n] (n)? y
(2)ambari-server 账号。
Enter user account for ambari-server daemon (root):
如果直接回车就是默认选择root用户
如果输入已经创建的用户就会显示:
Enter user account for ambari-server daemon (root):ambari
Adjusting ambari-server permissions and ownership...
(3)检查防火墙是否关闭
Adjusting ambari-server permissions and ownership...
Checking firewall...
WARNING: iptables is running. Confirm the necessary Ambari ports are accessible. Refer to the Ambari documentation for more details on ports.
OK to continue [y/n] (y)?
直接回车
(4)设置JDK。输入:3
Checking JDK...
Do you want to change Oracle JDK [y/n] (n)? y
[1] Oracle JDK 1.8 + Java Cryptography Extension (JCE) Policy Files 8
[2] Oracle JDK 1.7 + Java Cryptography Extension (JCE) Policy Files 7
[3] Custom JDK
==============================================================================
Enter choice (1): 3
如果上面选择3自定义JDK,则需要设置JAVA_HOME。输入:/usr/local/java/
WARNING: JDK must be installed on all hosts and JAVA_HOME must be valid on all hosts.
WARNING: JCE Policy files are required for configuring Kerberos security. If you plan to use Kerberos,please make sure JCE Unlimited Strength Jurisdiction Policy Files are valid on all hosts.
Path to JAVA_HOME: /usr/java/jdk1.8.0_131
Validating JDK on Ambari Server...done.
Completing setup...
(5)数据库配置。选择:y
Configuring database...
Enter advanced database configuration [y/n] (n)? y
(6)选择数据库类型。输入:3
Configuring database...
==============================================================================
Choose one of the following options:
[1] - PostgreSQL (Embedded)
[2] - Oracle
[3] - MySQL
[4] - PostgreSQL
[5] - Microsoft SQL Server (Tech Preview)
[6] - SQL Anywhere
==============================================================================
Enter choice (3): 3
(7)设置数据库的具体配置信息,根据实际情况输入,如果和括号内相同,则可以直接回车。如果想重命名,就输入。
Hostname (localhost): namenodemaster.hadoop
Port (3306): 3306
Database name (ambari): ambari
Username (ambari): ambari
Enter Database Password (bigdata):ambari123
Re-Enter password: ambari123
(8)将Ambari数据库脚本导入到数据库
WARNING: Before starting Ambari Server, you must run the following DDL against the database to create the schema: /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql 
Proceed with configuring remote database connection properties [y/n] (y)? y
[root@master ~]# ambari-server start

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_05

 

 错误处理:

如果出现错误,请注意查看日志,根据具体的错误内容进行处理,默认ambari-server的日志在/var/log/ambari-server/ambari-server.log里面。如果在处理日志的过程中或者后面安装的过程中出现一些莫名的错误,可以重置的安装。
手动将mysql里面创建的数据库进行删除,使用下面的代码重置ambari-server。

手动删除数据库

[root@master ~]# mysql -uroot -p
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| ambari |
| hive |
| oozie |
| performance_schema |
+--------------------+
rows in set (0.00 sec)
mysql> drop database ambari;
mysql> drop database hive;
mysql> drop database oozie;重置ambari-server
[root@master ~]# ambari-server stop
[root@master ~]# ambari-server reset
[root@master ~]# ambari-server setup



安装部署HDP集群

登录ambari界面

http://namenodemaster.hadoop:8080

账户密码默认 admin

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_06

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_07

 

 

选择版本,加入本地源

http://122.225.77.45/hdp/HDP/centos7/3.1.4.0-315/
http://122.226.77.45/hdp/HDP-GPL/centos7/3.1.4.0-315/
http://122.225.77.45/hdp/HDP-UTILS-1.1.0.22/HDP-UTILS/centos7/1.1.0.22/

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_08

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_09

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_10

 

 检查潜在的问题,可以通过这个查看有什么问题

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_11

 

 

选择安装组件

Ambari 大数据安装 spark ambari安装hadoop(centos7)_ambari_12

 

 

勾选DataNote、NameNode、Client

Ambari 大数据安装 spark ambari安装hadoop(centos7)_Ambari 大数据安装 spark_13

 

 

配置安全密码

Ambari 大数据安装 spark ambari安装hadoop(centos7)_Ambari 大数据安装 spark_14

 

 

红色的都要设置,原有的不用修改

提示驱动问题的话
[root@namenodemaster java] ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar
[root@namenodemaster java] ambari-server restart

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_15

 

 

完成集群部署

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_16

 

 安装组件的常见报错

问题

解决

failed connect to 192.168.10.151:80; Connection refused

重启http服务

systemctl start httpd.service

 

Applying File['/usr/hdp/current/slider-client/lib/slider.tar.gz'] failed, parent directory /usr/hdp/current/slider-client/lib doesn't exist

创建路径即可

fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))

resource_management.core.exceptions.Fail: Cannot match package for regexp name zookeeper_${stack_version}. Available packages

 

ln -s  /usr/hdp/ 3.1.4.0-315 /slider   /usr/hdp/current/slider-client

resource_management.core.exceptions.ExecutionFailed: Execution of 'ambari-python-wrap /usr/bin/hdp-select set oozie-client 3.1.4.0-315' returned 1. symlink target /usr/hdp/current/oozie-client for oozie already exists and it is not a symlink.

从其他机器上拷贝

/usr/hdp/ 3.1.4.0-315 /oozie

#ln -s  /usr/hdp/ 3.1.4.0-315 /oozie /usr/hdp/current/oozie-client

# /usr/bin/hdp-select set oozie-client 3.1.4.0-315

 

resource_management.core.exceptions.Fail: Cannot create directory '/usr/hdp/current/slider-client/conf' as '/usr/hdp/current/slider-client' is a broken symlink

mkdir /usr/hdp/3.1.4.0-315/slider/conf

mkdir /usr/hdp/3.1.4.0-315/slider/lib

 

 line 321: install-activity-analyzer.sh: command not found

yum remove smartsense-hst

rm -rf /var/log/smartsense/

 

HDP 2.6.1 Infra Solr Client Install fails "Source file /usr/lib/ambari-infra-solr-client/solrCloudCli.sh is not found"

Query for ambari rpm:

# rpm -qa | grep ambari

Removed ambari-infra-solr-client

yum remove ambari-infra-solr-client

 

oozie web console is disabled.

安装extjs2.2.zip

Web UI failing with HTTP 503 service unavailable

安装je.jar 

参考:

https://community.hortonworks.com/questions/77600/faclon-web-ui-failing-with-http-503-service-unavai.html

 

 

 exception: java.lang.ClassNotFoundException: org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter

yum reinstall ambari-metrics-hadoop-sink

参考:

https://community.hortonworks.com/questions/135736/getting-exception-javalangclassnotfoundexception-o.html

https://community.hortonworks.com/content/supportkb/49117/storm-worker-fails-with-javalangclassnotfoundexcep.html

there are unfinished transactions remaining. You might consider running yum-complete-transaction first to finish them。

### yum 清除未完成的

1、安装 yum-complete-transaction

这是一个能发现未完成或被中断的yum事务的程序。

#yum -y install yum-utils  

2、清除yum缓存

#yum clean all

3、执行清理未完成事务

#yum-complete-transaction --cleanup-only

参考:https://yq.aliyun.com/ziliao/65810

 

 

当集群突然断电之后,重启怎么办?
解决办法: 
1、启动ntpd(时间同步)服务 
有的系统管理员在测试集群上没有让ntpd开机启动,就要手动启动了,一般情况下,生产集群上是肯定开启的。 
service ntpd start

2、重启ambari服务和ambari代理 
ambari-server restart(主节点) 
ambari-agent restart(各个节点都要重启)

3、通过图形界面启动和管理各个服务 
通过浏览器访问一下地址http://主机ip:8080进行管理即可

 

HDFS  UI 无法打开

其实问题很简单,因为在CDH中hdfs-site.xml配置文件中WEB UI配置的是域名,而域名在Hosts又被解析成内网IP。

你使用netstat -apn | grep 50700看一下监听情况。

你会发现监听的是你的内网IP,而不是外网IP,所以HDFS的WEB UI就无法访问

需要更改50700绑定0.0.0.0

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_17

 

 

更改config配置

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_18

 

 

Oozie   Web Console Is Disabled问题解决方法

Ambari 大数据安装 spark ambari安装hadoop(centos7)_ambari_19

1、登录至主服务器

2、手动下载extjs-2.2-1.noarch.rpm库,并安装

wget http://public-repo-1.hortonworks.com/HDP-UTILS-GPL-1.1.0.22/repos/centos7-ppc/extjs/extjs-2.2-1.noarch.rpm

rpm -ivh extjs-2.2-1.noarch.rpm

3、删掉这个/usr/hdp/current/oozie-server/.prepare_war_cmd

rm /usr/hdp/current/oozie-server/.prepare_war_cmd

4、重启Oozie服务

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_20

重新登录

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_21

 

 

新增datenode服务器

1、修改主机名

 

主机名slave005.hadoop

 

2、修改yum源

 

(1)备份本地yum源

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak

(2)获取阿里yum源配置文件

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

yum makecache

3、修改hosts

 

slave005

echo -e "192.168.1.45 namenodemaster.hadoop  namenodemaster \n192.168.1.46 yarnserver.hadoop yarnserver \n192.168.1.47 slave001.hadoop slave001 \n192.168.1.48 slave002.hadoop slave002\n192.168.1.44 slave003.hadoop slave003\n192.168.1.51 slave004.hadoop slave004" >> /etc/hosts

 

剩余各个节点

echo -e "192.168.1.211 slave005.hadoop  slave005 " >> /etc/hosts

4、密钥登陆(master能秘钥登录新节点)


在新节点

mkdir -p /root/.ssh/

 

在namenodemaster.hadoop上运行

cd /root/.ssh/

ssh-copy-id  -p 10022  -i  /root/.ssh/id_rsa.pub  root@slave005.hadoop

 

在新节点

cat id_rsa.pub >> authorized_keys

5、java

6、拷贝服务器上面的本地源配置文件到新增主机的/etc/yum.repos.d/ 目录

master服务器

cd /etc/yum.repos.d/
scp -P 10022 ambari.repo hdp.repo hdp-utils.repo hdp.gpl.repo root@slave005.hadoop:$PWD

新节点
清理一下yum缓存
yum clean all && yum makecache
yum repolist

 

7、打开安全限制

 

vi –b  /etc/security/limits.conf

 

文件末尾新增如下:

 

# End of file

* soft nofile 65536

* hard nofile 65536

* soft nproc 131072

* hard nproc 131072

 

8、安装NTP

yum -y install ntp

vim /etc/ntp.conf

systemctl start ntpd.service

systemctl enable ntpd.service

 

cat /etc/ntp.conf

driftfile /var/lib/ntp/drift

restrict default nomodify notrap nopeer noquery

restrict 127.0.0.1

restrict ::1

server namenodemaster.hadoop

includefile /etc/ntp/crypto/pw

keys /etc/ntp/keys

disable monitor

 

9、接入集群

 

创建/var/lib/ambari-agent/data目录

mkdir –p /var/lib/ambari-agent/data

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_22

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_ambari_23

 

 

 

默认的数据存放路径是  /usr/hdp

磁盘空间一定要够,不然安装报错

oozie报错,可以从报错很明显看到失败原因

Ambari 大数据安装 spark ambari安装hadoop(centos7)_ambari_24

 

 10、DataNode节点磁盘均衡(Rebalancing HDFS))

① 在 Ambari Web 中,浏览到 Services > HDFS > Summary

② 单击 Service Actions, 然后单击 Rebalance HDFS.

③ 输入 Balance Threshold 值作为磁盘容量到百分比

④ 单击 Start

Ambari 大数据安装 spark ambari安装hadoop(centos7)_Ambari 大数据安装 spark_25

 

 

可以通过打开 Background Operations 窗口监控或取消重均衡进程。

 

删除节点

通过ambari平台操作

(1)设置slave001节点设置DataNode配置为Decommissioned,排空数据

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_26

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_Ambari 大数据安装 spark_27

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_centos_28

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_ambari_29

 

 

 

(2)数据排空后删除节点

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_30

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_Ambari 大数据安装 spark_31

Ambari 大数据安装 spark ambari安装hadoop(centos7)_hadoop_32

 

 

Ambari 大数据安装 spark ambari安装hadoop(centos7)_ambari_33