HADOOP-11460. Deprecate shell vars (John Smith via aw)

This commit is contained in:
Allen Wittenauer 2015-02-04 16:35:50 -08:00
parent c6f20007eb
commit 43d5caef5e
13 changed files with 132 additions and 202 deletions

View File

@ -146,6 +146,8 @@ Trunk (Unreleased)
HADOOP-11058. Missing HADOOP_CONF_DIR generates strange results HADOOP-11058. Missing HADOOP_CONF_DIR generates strange results
(Masatake Iwasaki via aw) (Masatake Iwasaki via aw)
HADOOP-11460. Deprecate shell vars (John Smith via aw)
BUG FIXES BUG FIXES
HADOOP-11473. test-patch says "-1 overall" even when all checks are +1 HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

View File

@ -107,8 +107,6 @@ while [[ -z "${_hadoop_common_done}" ]]; do
confdir=$1 confdir=$1
shift shift
if [[ -d "${confdir}" ]]; then if [[ -d "${confdir}" ]]; then
# shellcheck disable=SC2034
YARN_CONF_DIR="${confdir}"
# shellcheck disable=SC2034 # shellcheck disable=SC2034
HADOOP_CONF_DIR="${confdir}" HADOOP_CONF_DIR="${confdir}"
elif [[ -z "${confdir}" ]]; then elif [[ -z "${confdir}" ]]; then

View File

@ -28,6 +28,28 @@ function hadoop_debug
fi fi
} }
function hadoop_deprecate_envvar
{
#
# Deprecate $1 with $2
local oldvar=$1
local newvar=$2
local oldval=${!oldvar}
local newval=${!newvar}
if [[ -n "${oldval}" ]]; then
hadoop_error "WARNING: ${oldvar} has been replaced by ${newvar}. Using value of ${oldvar}."
# shellcheck disable=SC2086
eval ${newvar}=\"${oldval}\"
# shellcheck disable=SC2086
newval=${oldval}
# shellcheck disable=SC2086
eval ${newvar}=\"${newval}\"
fi
}
function hadoop_bootstrap_init function hadoop_bootstrap_init
{ {
# NOTE: This function is not user replaceable. # NOTE: This function is not user replaceable.
@ -200,8 +222,6 @@ function hadoop_populate_slaves_file()
elif [[ -f "${HADOOP_CONF_DIR}/${slavesfile}" ]]; then elif [[ -f "${HADOOP_CONF_DIR}/${slavesfile}" ]]; then
# shellcheck disable=2034 # shellcheck disable=2034
HADOOP_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}" HADOOP_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
# shellcheck disable=2034
YARN_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
else else
hadoop_error "ERROR: Cannot find hosts file \"${slavesfile}\"" hadoop_error "ERROR: Cannot find hosts file \"${slavesfile}\""
hadoop_exit_with_usage 1 hadoop_exit_with_usage 1

View File

@ -18,10 +18,6 @@
# hadoop-env.sh is read prior to this file. # hadoop-env.sh is read prior to this file.
# #
# KMS logs directory
#
# export KMS_LOG=${HADOOP_LOG_DIR}
# KMS temporary directory # KMS temporary directory
# #
# export KMS_TEMP=${HADOOP_PREFIX}/temp # export KMS_TEMP=${HADOOP_PREFIX}/temp

View File

@ -30,10 +30,11 @@ function hadoop_subproject_init
export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_PREFIX}/temp}" export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_PREFIX}/temp}"
export HADOOP_CONF_DIR="${KMS_CONFIG:-${HADOOP_CONF_DIR}}" hadoop_deprecate_envvar KMS_CONFIG HADOOP_CONF_DIR
export HADOOP_CATALINA_CONFIG="${HADOOP_CONF_DIR}"
export HADOOP_LOG_DIR="${KMS_LOG:-${HADOOP_LOG_DIR}}" hadoop_deprecate_envvar KMS_LOG HADOOP_LOG_DIR
export HADOOP_CATALINA_CONFIG="${HADOOP_CONF_DIR}"
export HADOOP_CATALINA_LOG="${HADOOP_LOG_DIR}" export HADOOP_CATALINA_LOG="${HADOOP_LOG_DIR}"
export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-16000}" export HADOOP_CATALINA_HTTP_PORT="${KMS_HTTP_PORT:-16000}"

View File

@ -34,30 +34,23 @@ function hadoop_subproject_init
# used interchangeable from here on out # used interchangeable from here on out
# ... # ...
# this should get deprecated at some point. # this should get deprecated at some point.
HADOOP_LOG_DIR="${HADOOP_HDFS_LOG_DIR:-$HADOOP_LOG_DIR}"
HADOOP_HDFS_LOG_DIR="${HADOOP_LOG_DIR}" hadoop_deprecate_envvar HADOOP_HDFS_LOG_DIR HADOOP_LOG_DIR
hadoop_deprecate_envvar HADOOP_HDFS_LOGFILE HADOOP_LOGFILE
hadoop_deprecate_envvar HADOOP_HDFS_NICENESS HADOOP_NICENESS
hadoop_deprecate_envvar HADOOP_HDFS_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
HADOOP_LOGFILE="${HADOOP_HDFS_LOGFILE:-$HADOOP_LOGFILE}" hadoop_deprecate_envvar HADOOP_HDFS_PID_DIR HADOOP_PID_DIR
HADOOP_HDFS_LOGFILE="${HADOOP_LOGFILE}"
hadoop_deprecate_envvar HADOOP_HDFS_ROOT_LOGGER HADOOP_ROOT_LOGGER
HADOOP_NICENESS=${HADOOP_HDFS_NICENESS:-$HADOOP_NICENESS}
HADOOP_HDFS_NICENESS="${HADOOP_NICENESS}" hadoop_deprecate_envvar HADOOP_HDFS_IDENT_STRING HADOOP_IDENT_STRING
HADOOP_STOP_TIMEOUT=${HADOOP_HDFS_STOP_TIMEOUT:-$HADOOP_STOP_TIMEOUT}
HADOOP_HDFS_STOP_TIMEOUT="${HADOOP_STOP_TIMEOUT}"
HADOOP_PID_DIR="${HADOOP_HDFS_PID_DIR:-$HADOOP_PID_DIR}"
HADOOP_HDFS_PID_DIR="${HADOOP_PID_DIR}"
HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER}
HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_PREFIX}" HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_PREFIX}"
HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}"
HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}"
# turn on the defaults # turn on the defaults
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}
export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"} export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
@ -66,8 +59,6 @@ function hadoop_subproject_init
export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"} export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"}
export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"} export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"}
export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"} export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"}
} }
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then

View File

@ -34,28 +34,22 @@ function hadoop_subproject_init
# used interchangeable from here on out # used interchangeable from here on out
# ... # ...
# this should get deprecated at some point. # this should get deprecated at some point.
HADOOP_LOG_DIR="${HADOOP_MAPRED_LOG_DIR:-$HADOOP_LOG_DIR}"
HADOOP_MAPRED_LOG_DIR="${HADOOP_LOG_DIR}" hadoop_deprecate_envvar HADOOP_MAPRED_LOG_DIR HADOOP_LOG_DIR
hadoop_deprecate_envvar HADOOP_MAPRED_LOGFILE HADOOP_LOGFILE
HADOOP_LOGFILE="${HADOOP_MAPRED_LOGFILE:-$HADOOP_LOGFILE}" hadoop_deprecate_envvar HADOOP_MAPRED_NICENESS HADOOP_NICENESS
HADOOP_MAPRED_LOGFILE="${HADOOP_LOGFILE}"
HADOOP_NICENESS="${HADOOP_MAPRED_NICENESS:-$HADOOP_NICENESS}" hadoop_deprecate_envvar HADOOP_MAPRED_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
HADOOP_MAPRED_NICENESS="${HADOOP_NICENESS}"
HADOOP_STOP_TIMEOUT="${HADOOP_MAPRED_STOP_TIMEOUT:-$HADOOP_STOP_TIMEOUT}"
HADOOP_MAPRED_STOP_TIMEOUT="${HADOOP_STOP_TIMEOUT}"
HADOOP_PID_DIR="${HADOOP_MAPRED_PID_DIR:-$HADOOP_PID_DIR}"
HADOOP_MAPRED_PID_DIR="${HADOOP_PID_DIR}"
HADOOP_ROOT_LOGGER="${HADOOP_MAPRED_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
HADOOP_MAPRED_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
hadoop_deprecate_envvar HADOOP_MAPRED_PID_DIR HADOOP_PID_DIR
hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_PREFIX}" HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_PREFIX}"
HADOOP_IDENT_STRING="${HADOOP_MAPRED_IDENT_STRING:-$HADOOP_IDENT_STRING}" hadoop_deprecate_envvar HADOOP_MAPRED_IDENT_STRING HADOOP_IDENT_STRING
HADOOP_MAPRED_IDENT_STRING="${HADOOP_IDENT_STRING}"
} }
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then

View File

@ -24,49 +24,22 @@
## MAPRED_xyz > HADOOP_xyz > hard-coded defaults ## MAPRED_xyz > HADOOP_xyz > hard-coded defaults
## ##
###
# Generic settings for MapReduce
###
#Override the log4j settings for all MR apps
# Java property: hadoop.root.logger
# export MAPRED_ROOT_LOGGER="INFO,console"
# Override Hadoop's log directory & file
# Java property: hadoop.log.dir
# export HADOOP_MAPRED_LOG_DIR=""
# Override Hadoop's pid directory
# export HADOOP_MAPRED_PID_DIR=
# Override Hadoop's identity string. $USER by default.
# This is used in writing log and pid files, so keep that in mind!
# Java property: hadoop.id.str
# export HADOOP_MAPRED_IDENT_STRING=$USER
# Override Hadoop's process priority
# Note that sub-processes will also run at this level!
# export HADOOP_MAPRED_NICENESS=0
### ###
# Job History Server specific parameters # Job History Server specific parameters
### ###
# Specify the max heapsize for the JobHistoryServer. If no units are # Specify the max heapsize for the JobHistoryServer. If no units are
# given, it will be assumed to be in MB. # given, it will be assumed to be in MB.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS, # This value will be overridden by an Xmx setting specified in HADOOP_OPTS,
# HADOOP_OPTS, and/or HADOOP_JOB_HISTORYSERVER_OPTS. # and/or HADOOP_JOB_HISTORYSERVER_OPTS.
# Default is the same as HADOOP_HEAPSIZE_MAX. # Default is the same as HADOOP_HEAPSIZE_MAX.
#export HADOOP_JOB_HISTORYSERVER_HEAPSIZE= #export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=
# Specify the JVM options to be used when starting the HistoryServer. # Specify the JVM options to be used when starting the HistoryServer.
# These options will be appended to the options specified as YARN_OPTS # These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in YARN_OPTS # and therefore may override any similar flags set in HADOOP_OPTS
#export HADOOP_JOB_HISTORYSERVER_OPTS= #export HADOOP_JOB_HISTORYSERVER_OPTS=
# Specify the log4j settings for the JobHistoryServer # Specify the log4j settings for the JobHistoryServer
# Java property: hadoop.root.logger # Java property: hadoop.root.logger
#export HADOOP_JHS_LOGGER=INFO,RFA #export HADOOP_JHS_LOGGER=INFO,RFA

View File

@ -43,9 +43,9 @@ fi
# start resourceManager # start resourceManager
echo "Starting resourcemanager" echo "Starting resourcemanager"
"${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon start resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon start resourcemanager
# start nodeManager # start nodeManager
echo "Starting nodemanagers" echo "Starting nodemanagers"
"${bin}/yarn-daemons.sh" --config "${YARN_CONF_DIR}" start nodemanager "${bin}/yarn-daemons.sh" --config "${HADOOP_CONF_DIR}" start nodemanager
# start proxyserver # start proxyserver
#"${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon start proxyserver #"${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon start proxyserver

View File

@ -45,9 +45,9 @@ fi
# stop resourceManager # stop resourceManager
echo "Stopping resourcemanager" echo "Stopping resourcemanager"
"${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon stop resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon stop resourcemanager
# stop nodeManager # stop nodeManager
echo "Stopping nodemanagers" echo "Stopping nodemanagers"
"${bin}/yarn-daemons.sh" --config "${YARN_CONF_DIR}" stop nodemanager "${bin}/yarn-daemons.sh" --config "${HADOOP_CONF_DIR}" stop nodemanager
# stop proxyserver # stop proxyserver
#"${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon stop proxyserver #"${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon stop proxyserver

View File

@ -76,8 +76,8 @@ shift
case "${COMMAND}" in case "${COMMAND}" in
application|applicationattempt|container) application|applicationattempt|container)
CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
set -- "${COMMAND}" "$@" set -- "${COMMAND}" "$@"
;; ;;
classpath) classpath)
@ -85,13 +85,13 @@ case "${COMMAND}" in
;; ;;
daemonlog) daemonlog)
CLASS=org.apache.hadoop.log.LogLevel CLASS=org.apache.hadoop.log.LogLevel
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
jar) jar)
CLASS=org.apache.hadoop.util.RunJar CLASS=org.apache.hadoop.util.RunJar
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
historyserver) historyserver)
supportdaemonization="true" supportdaemonization="true"
@ -102,19 +102,19 @@ case "${COMMAND}" in
;; ;;
logs) logs)
CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
node) node)
CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
nodemanager) nodemanager)
supportdaemonization="true" supportdaemonization="true"
CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager' CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
hadoop_debug "Append YARN_NODEMANAGER_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_NODEMANAGER_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_NODEMANAGER_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_NODEMANAGER_OPTS}"
# Backwards compatibility # Backwards compatibility
if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}" HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}"
@ -123,8 +123,8 @@ case "${COMMAND}" in
proxyserver) proxyserver)
supportdaemonization="true" supportdaemonization="true"
CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer' CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
hadoop_debug "Append YARN_PROXYSERVER_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_PROXYSERVER_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_PROXYSERVER_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_PROXYSERVER_OPTS}"
# Backwards compatibility # Backwards compatibility
if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}" HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}"
@ -132,14 +132,14 @@ case "${COMMAND}" in
;; ;;
queue) queue)
CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
resourcemanager) resourcemanager)
supportdaemonization="true" supportdaemonization="true"
CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager' CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
YARN_OPTS="${YARN_OPTS} ${YARN_RESOURCEMANAGER_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_RESOURCEMANAGER_OPTS}"
hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto HADOOP_OPTS"
# Backwards compatibility # Backwards compatibility
if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}" HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}"
@ -147,25 +147,25 @@ case "${COMMAND}" in
;; ;;
rmadmin) rmadmin)
CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI' CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
scmadmin) scmadmin)
CLASS='org.apache.hadoop.yarn.client.SCMAdmin' CLASS='org.apache.hadoop.yarn.client.SCMAdmin'
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
sharedcachemanager) sharedcachemanager)
supportdaemonization="true" supportdaemonization="true"
CLASS='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager' CLASS='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
hadoop_debug "Append YARN_SHAREDCACHEMANAGER_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_SHAREDCACHEMANAGER_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_SHAREDCACHEMANAGER_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_SHAREDCACHEMANAGER_OPTS}"
;; ;;
timelineserver) timelineserver)
supportdaemonization="true" supportdaemonization="true"
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer' CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_TIMELINESERVER_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_TIMELINESERVER_OPTS}"
# Backwards compatibility # Backwards compatibility
if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}" HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}"
@ -173,8 +173,8 @@ case "${COMMAND}" in
;; ;;
version) version)
CLASS=org.apache.hadoop.util.VersionInfo CLASS=org.apache.hadoop.util.VersionInfo
hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS" hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
;; ;;
*) *)
CLASS="${COMMAND}" CLASS="${COMMAND}"
@ -186,36 +186,25 @@ esac
hadoop_verify_user "${COMMAND}" hadoop_verify_user "${COMMAND}"
# set HADOOP_OPTS to YARN_OPTS so that we can use
# finalize, etc, without doing anything funky
hadoop_debug "Resetting HADOOP_OPTS=YARN_OPTS"
# shellcheck disable=SC2034
HADOOP_OPTS="${YARN_OPTS}"
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid" daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
# shellcheck disable=SC2034 # shellcheck disable=SC2034
HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}" HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
YARN_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log" HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
fi fi
# Add YARN custom options to comamnd line in case someone actaully # Add YARN custom options to comamnd line in case someone actaully
# used these. # used these.
#
# Note that we are replacing ' ' with '\ ' so that when we exec
# stuff it works
#
YARN_LOG_DIR=$HADOOP_LOG_DIR YARN_LOG_DIR=$HADOOP_LOG_DIR
hadoop_translate_cygwin_path YARN_LOG_DIR hadoop_translate_cygwin_path YARN_LOG_DIR
hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${YARN_LOG_DIR/ /\ }" hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${YARN_LOG_DIR}"
hadoop_add_param HADOOP_OPTS yarn.log.file "-Dyarn.log.file=${HADOOP_LOGFILE/ /\ }" hadoop_add_param HADOOP_OPTS yarn.log.file "-Dyarn.log.file=${HADOOP_LOGFILE}"
YARN_HOME_DIR=$HADOOP_YARN_HOME YARN_HOME_DIR=$HADOOP_YARN_HOME
hadoop_translate_cygwin_path YARN_HOME_DIR hadoop_translate_cygwin_path YARN_HOME_DIR
hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${YARN_HOME_DIR/ /\ }" hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${YARN_HOME_DIR}"
hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
hadoop_finalize hadoop_finalize

View File

@ -33,49 +33,35 @@ function hadoop_subproject_init
export HADOOP_YARN_ENV_PROCESSED=true export HADOOP_YARN_ENV_PROCESSED=true
fi fi
if [[ -n "${YARN_CONF_DIR}" ]]; then hadoop_deprecate_envvar YARN_CONF_DIR HADOOP_CONF_DIR
HADOOP_CONF_DIR="${YARN_CONF_DIR}"
fi hadoop_deprecate_envvar YARN_LOG_DIR HADOOP_LOG_DIR
hadoop_deprecate_envvar YARN_LOGFILE HADOOP_LOGFILE
YARN_CONF_DIR="${HADOOP_CONF_DIR}" hadoop_deprecate_envvar YARN_NICENESS HADOOP_NICENESS
# YARN_CONF_DIR needs precedence over HADOOP_CONF_DIR hadoop_deprecate_envvar YARN_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
# and the various jar dirs
hadoop_add_classpath "${YARN_CONF_DIR}" before
HADOOP_LOG_DIR="${YARN_LOG_DIR:-$HADOOP_LOG_DIR}" hadoop_deprecate_envvar YARN_PID_DIR HADOOP_PID_DIR
YARN_LOG_DIR="${HADOOP_LOG_DIR}"
HADOOP_LOGFILE="${YARN_LOGFILE:-$HADOOP_LOGFILE}" hadoop_deprecate_envvar YARN_ROOT_LOGGER HADOOP_ROOT_LOGGER
YARN_LOGFILE="${HADOOP_LOGFILE}"
hadoop_deprecate_envvar YARN_IDENT_STRING HADOOP_IDENT_STRING
HADOOP_NICENESS="${YARN_NICENESS:-$HADOOP_NICENESS}"
YARN_NICENESS="${HADOOP_NICENESS}" hadoop_deprecate_envvar YARN_OPTS HADOOP_OPTS
HADOOP_STOP_TIMEOUT="${YARN_STOP_TIMEOUT:-$HADOOP_STOP_TIMEOUT}" hadoop_deprecate_envvar YARN_SLAVES HADOOP_SLAVES
YARN_STOP_TIMEOUT="${HADOOP_STOP_TIMEOUT}"
HADOOP_PID_DIR="${YARN_PID_DIR:-$HADOOP_PID_DIR}"
YARN_PID_DIR="${HADOOP_PID_DIR}"
HADOOP_ROOT_LOGGER="${YARN_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}"
YARN_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
HADOOP_YARN_HOME="${HADOOP_YARN_HOME:-$HADOOP_PREFIX}" HADOOP_YARN_HOME="${HADOOP_YARN_HOME:-$HADOOP_PREFIX}"
HADOOP_IDENT_STRING="${YARN_IDENT_STRING:-$HADOOP_IDENT_STRING}"
YARN_IDENT_STRING="${HADOOP_IDENT_STRING}"
YARN_OPTS="${YARN_OPTS:-$HADOOP_OPTS}"
# YARN-1429 added the completely superfluous YARN_USER_CLASSPATH # YARN-1429 added the completely superfluous YARN_USER_CLASSPATH
# env var. We're going to override HADOOP_USER_CLASSPATH to keep # env var. We're going to override HADOOP_USER_CLASSPATH to keep
# consistency with the rest of the duplicate/useless env vars # consistency with the rest of the duplicate/useless env vars
HADOOP_USER_CLASSPATH="${YARN_USER_CLASSPATH:-$HADOOP_USER_CLASSPATH}"
YARN_USER_CLASSPATH="${HADOOP_USER_CLASSPATH}" hadoop_deprecate_envvar YARN_USER_CLASSPATH HADOOP_USER_CLASSPATH
HADOOP_USER_CLASSPATH_FIRST="${YARN_USER_CLASSPATH_FIRST:-$HADOOP_USER_CLASSPATH_FIRST}" hadoop_deprecate_envvar YARN_USER_CLASSPATH_FIRST HADOOP_USER_CLASSPATH_FIRST
YARN_USER_CLASSPATH_FIRST="${HADOOP_USER_CLASSPATH_FIRST}"
} }
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then

View File

@ -25,40 +25,20 @@
## YARN_xyz > HADOOP_xyz > hard-coded defaults ## YARN_xyz > HADOOP_xyz > hard-coded defaults
## ##
###
# YARN-specific overrides for generic settings
###
# By default, YARN will use HADOOP_LOG_DIR for YARN logging. Specify a custom
# log directory for YARN things here:
# Java properties: hadoop.log.dir, yarn.log.dir
# export YARN_LOG_DIR="${HADOOP_LOG_DIR}"
# By default, YARN will use the value of HADOOP_LOGFILE as the 'fallback' log
# file # when log4j settings are not defined. Specify a custom YARN log file
# here:
# Java properties: hadoop.log.file, yarn.log.file
# export YARN_LOGFILE=${HADOOP_LOGFILE}
#Override the log4j settings for all YARN apps By default, YARN will use
# HADOOP_ROOT_LOGGER.
# Java properties: hadoop.root.logger, yarn.root.logger
# export YARN_ROOT_LOGGER=${HADOOP_ROOT_LOGGER}
### ###
# Resource Manager specific parameters # Resource Manager specific parameters
### ###
# Specify the max heapsize for the ResourceManager. If no units are # Specify the max heapsize for the ResourceManager. If no units are
# given, it will be assumed to be in MB. # given, it will be assumed to be in MB.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS, # This value will be overridden by an Xmx setting specified in either
# HADOOP_OPTS, and/or YARN_RESOURCEMANAGER_OPTS. # HADOOP_OPTS and/or YARN_RESOURCEMANAGER_OPTS.
# Default is the same as HADOOP_HEAPSIZE_MAX # Default is the same as HADOOP_HEAPSIZE_MAX
#export YARN_RESOURCEMANAGER_HEAPSIZE= #export YARN_RESOURCEMANAGER_HEAPSIZE=
# Specify the JVM options to be used when starting the ResourceManager. # Specify the JVM options to be used when starting the ResourceManager.
# These options will be appended to the options specified as YARN_OPTS # These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in YARN_OPTS # and therefore may override any similar flags set in HADOOP_OPTS
# #
# Examples for a Sun/Oracle JDK: # Examples for a Sun/Oracle JDK:
# a) override the appsummary log file: # a) override the appsummary log file:
@ -82,14 +62,14 @@
# Specify the max heapsize for the NodeManager. If no units are # Specify the max heapsize for the NodeManager. If no units are
# given, it will be assumed to be in MB. # given, it will be assumed to be in MB.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS, # This value will be overridden by an Xmx setting specified in either
# HADOOP_OPTS, and/or YARN_NODEMANAGER_OPTS. # HADOOP_OPTS and/or YARN_NODEMANAGER_OPTS.
# Default is the same as HADOOP_HEAPSIZE_MAX. # Default is the same as HADOOP_HEAPSIZE_MAX.
#export YARN_NODEMANAGER_HEAPSIZE= #export YARN_NODEMANAGER_HEAPSIZE=
# Specify the JVM options to be used when starting the NodeManager. # Specify the JVM options to be used when starting the NodeManager.
# These options will be appended to the options specified as YARN_OPTS # These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in YARN_OPTS # and therefore may override any similar flags set in HADOOP_OPTS
# #
# See ResourceManager for some examples # See ResourceManager for some examples
# #
@ -101,14 +81,14 @@
# Specify the max heapsize for the timelineserver. If no units are # Specify the max heapsize for the timelineserver. If no units are
# given, it will be assumed to be in MB. # given, it will be assumed to be in MB.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS, # This value will be overridden by an Xmx setting specified in either
# HADOOP_OPTS, and/or YARN_TIMELINESERVER_OPTS. # HADOOP_OPTS and/or YARN_TIMELINESERVER_OPTS.
# Default is the same as HADOOP_HEAPSIZE_MAX. # Default is the same as HADOOP_HEAPSIZE_MAX.
#export YARN_TIMELINE_HEAPSIZE= #export YARN_TIMELINE_HEAPSIZE=
# Specify the JVM options to be used when starting the TimeLineServer. # Specify the JVM options to be used when starting the TimeLineServer.
# These options will be appended to the options specified as YARN_OPTS # These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in YARN_OPTS # and therefore may override any similar flags set in HADOOP_OPTS
# #
# See ResourceManager for some examples # See ResourceManager for some examples
# #
@ -120,14 +100,14 @@
# Specify the max heapsize for the web app proxy server. If no units are # Specify the max heapsize for the web app proxy server. If no units are
# given, it will be assumed to be in MB. # given, it will be assumed to be in MB.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS, # This value will be overridden by an Xmx setting specified in either
# HADOOP_OPTS, and/or YARN_PROXYSERVER_OPTS. # HADOOP_OPTS and/or YARN_PROXYSERVER_OPTS.
# Default is the same as HADOOP_HEAPSIZE_MAX. # Default is the same as HADOOP_HEAPSIZE_MAX.
#export YARN_PROXYSERVER_HEAPSIZE= #export YARN_PROXYSERVER_HEAPSIZE=
# Specify the JVM options to be used when starting the proxy server. # Specify the JVM options to be used when starting the proxy server.
# These options will be appended to the options specified as YARN_OPTS # These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in YARN_OPTS # and therefore may override any similar flags set in HADOOP_OPTS
# #
# See ResourceManager for some examples # See ResourceManager for some examples
# #
@ -138,8 +118,8 @@
### ###
# Specify the JVM options to be used when starting the # Specify the JVM options to be used when starting the
# shared cache manager server. # shared cache manager server.
# These options will be appended to the options specified as YARN_OPTS # These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in YARN_OPTS # and therefore may override any similar flags set in HADOOP_OPTS
# #
# See ResourceManager for some examples # See ResourceManager for some examples
# #