文章目录

  • Hadoop的服务开启与关闭
  • 1、开启关闭所有服务(不推荐)
  • 1.命令使用
  • 2.start-all.sh脚本
  • 3.stop-all.sh
  • 2、开启Hadoop所有服务★★★
  • 1.命令使用
  • 2.start-dfs.sh
  • 3.start-yarn.sh
  • 3、关闭Hadoop所有服务★★★
  • 1. 命令使用
  • 2.stop-dfs.sh
  • 3.stop-yarn.sh
  • 4、利用浏览器查看hadoop状态
  • 1.访问HDFS控制页面
  • 2.访问历史服务(historyserver)
  • 3.进程监控(application monitoring)


Hadoop的服务开启与关闭

1、开启关闭所有服务(不推荐)

1.命令使用

#开启所有服务
start-all.sh
#关闭所有服务
stop-all.sh

2.start-all.sh脚本

#定义异常退出的方法
function hadoop_abort_startall()
{
  exit 1
}

# 定位执行libexec
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  this="${BASH_SOURCE-$0}"
  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
#检查是否可以执行
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
  exit 1
fi

if ! hadoop_privilege_check; then
  trap hadoop_abort_startall INT
  hadoop_error "WARNING: Attempting to start all Apache Hadoop daemons as ${USER} in 10 seconds."
  hadoop_error "WARNING: This is not a recommended production deployment configuration."
  hadoop_error "WARNING: Use CTRL-C to abort."
  sleep 10
  trap - INT
fi

# 调用start-dfs.sh开启hdfs服务
if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
  "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
fi

# 调用start-yarn.sh开启yarn服务
if [[ -f "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" ]]; then
  "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" --config "${HADOOP_CONF_DIR}"
fi

3.stop-all.sh

#定义异常退出
function hadoop_abort_stopall()
{
  exit 1
}

# 定位libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  this="${BASH_SOURCE-$0}"
  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# shellcheck disable=SC2034
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
  exit 1
fi

if ! hadoop_privilege_check; then
  trap hadoop_abort_stopall INT
  hadoop_error "WARNING: Stopping all Apache Hadoop daemons as ${USER} in 10 seconds."
  hadoop_error "WARNING: Use CTRL-C to abort."
  sleep 10
  trap - INT
fi

# 执行stop-dfs.sh关闭hdfs
if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
  "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"
fi

# 指定stop-yarn.sh关闭yarn
if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-yarn.sh" ]]; then
  "${HADOOP_HDFS_HOME}/sbin/stop-yarn.sh" --config "${HADOOP_CONF_DIR}"
fi

2、开启Hadoop所有服务★★★

1.命令使用

#开启HDFS服务
start-dfs.sh
#------------------------------------------------------------------
Starting namenodes on [single01]
Last login: Wed Dec 29 17:17:39 CST 2021 from 192.168.50.1 on pts/0
Starting datanodes
Last login: Wed Dec 29 17:17:54 CST 2021 on pts/0
Starting secondary namenodes [single01]
Last login: Wed Dec 29 17:17:56 CST 2021 on pts/0
#------------------------------------------------------------------
#开启yarn服务
start-yarn.sh
#-------------------------------------------------
Starting resourcemanager
Last login: Wed Dec 29 17:17:59 CST 2021 on pts/0
Starting nodemanagers
Last login: Wed Dec 29 17:18:49 CST 2021 on pts/0
#------------------------------------------------
#开启历史记录服务
mapred --daemon start historyserver
#查看是否全部开启
jps
#--------------------
1936 NameNode
2770 NodeManager
3202 JobHistoryServer
3251 Jps
2618 ResourceManager
2110 DataNode
2334 SecondaryNameNod
#--------------------

2.start-dfs.sh

#!/usr/bin/env bash
function hadoop_usage
{
  echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
}

this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

# 定位本地的libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# 检查配置信息
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
  exit 1
fi

# 获取执行参数
if [[ $# -ge 1 ]]; then
  startOpt="$1"
  shift
  case "$startOpt" in
    -upgrade)
      nameStartOpt="$startOpt"
    ;;
    -rollback)
      dataStartOpt="$startOpt"
    ;;
    *)
      hadoop_exit_with_usage 1
    ;;
  esac
fi


#其他的情况
nameStartOpt="$nameStartOpt $*"

#---------------------------------------------------------
# 定义namenodes

NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)

# 获取hostname
if [[ -z "${NAMENODES}" ]]; then
  NAMENODES=$(hostname)
fi

#提示信息输出
echo "Starting namenodes on [${NAMENODES}]"

#启动hadoop的NameNode服务
hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${NAMENODES}" \
    --daemon start \
    namenode ${nameStartOpt}

HADOOP_JUMBO_RETCOUNTER=$?

#---------------------------------------------------------
# 提示信息输出
echo "Starting datanodes"
#启动DataNode服务
hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --daemon start \
    datanode ${dataStartOpt}
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))

#---------------------------------------------------------
 (if any)

SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
#
if [[ -n "${SECONDARY_NAMENODES}" ]]; then

  if [[ "${NAMENODES}" =~ , ]]; then

    hadoop_error "WARNING: Highly available NameNode is configured."
    hadoop_error "WARNING: Skipping SecondaryNameNode."

  else

    if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
      SECONDARY_NAMENODES=$(hostname)
    fi
	#提示信息输出
    echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
	# 启动secondary namenodes
    hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
      --workers \
      --config "${HADOOP_CONF_DIR}" \
      --hostnames "${SECONDARY_NAMENODES}" \
      --daemon start \
      secondarynamenode
    (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  fi
fi

#---------------------------------------------------------
# 对节点进行集群管理journalnode

JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)

if [[ "${#JOURNAL_NODES}" != 0 ]]; then
  echo "Starting journal nodes [${JOURNAL_NODES}]"
  #开其集群管理的journalnode
  hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${JOURNAL_NODES}" \
    --daemon start \
    journalnode
   (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi

#---------------------------------------------------------
# 如果在高可用状态启动失败进行Zookeeper失败重启
AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
  echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
  # 启动Zookeeper服务
  hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${NAMENODES}" \
    --daemon start \
    zkfc
  (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi

exit ${HADOOP_JUMBO_RETCOUNTER}

3.start-yarn.sh

function hadoop_usage
{
  hadoop_generate_usage "${MYNAME}" false
}
#获取资源参数
MYNAME="${BASH_SOURCE-$0}"
# 获取文件路径
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)

# 定位执行libexec
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# 定义失败信息输出
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/yarn-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/yarn-config.sh." 2>&1
  exit 1
fi

HADOOP_JUMBO_RETCOUNTER=0

# 开启resourceManager
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
if [[ ${HARM} = "false" ]]; then
  #提示信息输出
  echo "Starting resourcemanager"
  #执行resourceManager执行指令
  hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --daemon start \
      resourcemanager
  (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
else
  logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
  logicals=${logicals//,/ }
  for id in ${logicals}
  do
      rmhost=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey "yarn.resourcemanager.hostname.${id}" 2>&-)
      RMHOSTS="${RMHOSTS} ${rmhost}"
  done
  echo "Starting resourcemanagers on [${RMHOSTS}]"
  hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --daemon start \
      --workers \
      --hostnames "${RMHOSTS}" \
      resourcemanager
  (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi

# 开启nodemanager
echo "Starting nodemanagers"
# 开启nodemanager
hadoop_uservar_su yarn nodemanager "${HADOOP_YARN_HOME}/bin/yarn" \
    --config "${HADOOP_CONF_DIR}" \
    --workers \
    --daemon start \
    nodemanager
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))


# 开启proxyserver
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey  yarn.web-proxy.address 2>&- | cut -f1 -d:)
if [[ -n ${PROXYSERVER} ]]; then
 hadoop_uservar_su yarn proxyserver "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --workers \
      --hostnames "${PROXYSERVER}" \
      --daemon start \
      proxyserver
 (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi

exit ${HADOOP_JUMBO_RETCOUNTER}

3、关闭Hadoop所有服务★★★

1. 命令使用

#关闭历史记录服务
mapred --daemon stop historyserver
#关闭yarn服务
stop-yarn.sh
#------------------------------------------------------------------
Stopping nodemanagers
Last login: Wed Dec 29 18:55:34 CST 2021 from 192.168.50.1 on pts/0
Stopping resourcemanager
Last login: Wed Dec 29 18:56:30 CST 2021 on pts/0
#------------------------------------------------------------------
#关闭HDFS服务
stop-dfs.sh
#------------------------------------------------
Stopping namenodes on [single01]
Last login: Wed Dec 29 18:56:32 CST 2021 on pts/0
Stopping datanodes
Last login: Wed Dec 29 18:57:13 CST 2021 on pts/0
Stopping secondary namenodes [single01]
Last login: Wed Dec 29 18:57:14 CST 2021 on pts/0
#------------------------------------------------

2.stop-dfs.sh

function hadoop_usage
{
  echo "Usage: stop-dfs.sh"
}
# 获取执行环境
this="${BASH_SOURCE-$0}"
#获取文件路径
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)

# 获取libexec
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# 定义失败信息输出
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
  exit 1
fi

#---------------------------------------------------------
# 关闭namenodes

NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)

if [[ -z "${NAMENODES}" ]]; then
  NAMENODES=$(hostname)
fi
#NameNode关闭信息输出
echo "Stopping namenodes on [${NAMENODES}]"
  #执行NameNode关闭
  hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${NAMENODES}" \
    --daemon stop \
    namenode

#---------------------------------------------------------
# datanodes (using default workers file)
#关闭DataNode信息提示信息
echo "Stopping datanodes"

hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
  --workers \
  --config "${HADOOP_CONF_DIR}" \
  --daemon stop \
  datanode

#---------------------------------------------------------
# 关闭secondary namenodes

SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)

if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
  SECONDARY_NAMENODES=$(hostname)
fi

if [[ -n "${SECONDARY_NAMENODES}" ]]; then
  #提示信息输出
  echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
  #关闭secondary namenodes
  hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${SECONDARY_NAMENODES}" \
    --daemon stop \
    secondarynamenode
fi

#---------------------------------------------------------
#  quorumjournal nodes 
# 关闭journal node
JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)

if [[ "${#JOURNAL_NODES}" != 0 ]]; then
  #提示信息输出
  echo "Stopping journal nodes [${JOURNAL_NODES}]"
  #执行关闭journal node
  hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${JOURNAL_NODES}" \
    --daemon stop \
    journalnode
fi

#---------------------------------------------------------
# # 如果在高可用状态启动失败进行Zookeeper失败重启
AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
  echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"

  hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
    --workers \
    --config "${HADOOP_CONF_DIR}" \
    --hostnames "${NAMENODES}" \
    --daemon stop \
    zkfc
fi

3.stop-yarn.sh

function hadoop_usage
{
  hadoop_generate_usage "${MYNAME}" false
}
# 获取执行环境
MYNAME="${BASH_SOURCE-$0}"

bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)

# 获取libexec
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# 定义失败信息输出
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/yarn-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/yarn-config.sh." 2>&1
  exit 1
fi

# 关闭nodemanager信息输出
echo "Stopping nodemanagers"
# 执行关闭nodemanager
hadoop_uservar_su yarn nodemanager "${HADOOP_YARN_HOME}/bin/yarn" \
    --config "${HADOOP_CONF_DIR}" \
    --workers \
    --daemon stop \
    nodemanager

# 关闭resourceManager
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
if [[ ${HARM} = "false" ]]; then
  # 关闭resourceManager信息输出
  echo "Stopping resourcemanager"
  # 执行关闭resourceManager
  hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --daemon stop \
      resourcemanager
else
  logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
  logicals=${logicals//,/ }
  for id in ${logicals}
  do
      rmhost=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey "yarn.resourcemanager.hostname.${id}" 2>&-)
      RMHOSTS="${RMHOSTS} ${rmhost}"
  done
  echo "Stopping resourcemanagers on [${RMHOSTS}]"
  hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --daemon stop \
      --workers \
      --hostnames "${RMHOSTS}" \
      resourcemanager
fi

# 关闭 proxyserver
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey  yarn.web-proxy.address 2>&- | cut -f1 -d:)
if [[ -n ${PROXYSERVER} ]]; then
  # 关闭 proxyserver提示信息输出
  echo "Stopping proxy server [${PROXYSERVER}]"
  # 执行关闭 proxyserver
  hadoop_uservar_su yarn proxyserver "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --workers \
      --hostnames "${PROXYSERVER}" \
      --daemon stop \
      proxyserver
fi

4、利用浏览器查看hadoop状态

1.访问HDFS控制页面

主机IP地址:9870

#访问格式
host:9870
#例程
192.168.50.102:9870

2.访问历史服务(historyserver)

主机IP地址:19888

#访问格式
host:19888
#例程
192.168.50.102:19888

3.进程监控(application monitoring)

主机IP地址:8088

#访问格式
host:8088
#例程
192.168.50.102:8088