|
@@ -50,13 +50,13 @@ function hadoop_deprecate_envvar
|
|
fi
|
|
fi
|
|
}
|
|
}
|
|
|
|
|
|
-function hadoop_bootstrap_init
|
|
|
|
|
|
+function hadoop_bootstrap
|
|
{
|
|
{
|
|
# NOTE: This function is not user replaceable.
|
|
# NOTE: This function is not user replaceable.
|
|
|
|
|
|
# the root of the Hadoop installation
|
|
# the root of the Hadoop installation
|
|
# See HADOOP-6255 for the expected directory structure layout
|
|
# See HADOOP-6255 for the expected directory structure layout
|
|
-
|
|
|
|
|
|
+
|
|
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
|
|
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
|
|
# We can piggyback off of that to figure out where the default
|
|
# We can piggyback off of that to figure out where the default
|
|
# HADOOP_FREFIX should be. This allows us to run without
|
|
# HADOOP_FREFIX should be. This allows us to run without
|
|
@@ -70,7 +70,7 @@ function hadoop_bootstrap_init
|
|
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
|
|
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
|
|
HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
|
|
HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
|
|
export HADOOP_PREFIX
|
|
export HADOOP_PREFIX
|
|
-
|
|
|
|
|
|
+
|
|
#
|
|
#
|
|
# short-cuts. vendors may redefine these as well, preferably
|
|
# short-cuts. vendors may redefine these as well, preferably
|
|
# in hadoop-layouts.sh
|
|
# in hadoop-layouts.sh
|
|
@@ -88,7 +88,7 @@ function hadoop_bootstrap_init
|
|
TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
|
|
TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
|
|
|
|
|
|
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
|
|
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
|
|
-
|
|
|
|
|
|
+
|
|
# defaults
|
|
# defaults
|
|
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
|
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
|
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
|
|
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
|
|
@@ -146,7 +146,7 @@ function hadoop_exec_userfuncs
|
|
|
|
|
|
function hadoop_exec_hadooprc
|
|
function hadoop_exec_hadooprc
|
|
{
|
|
{
|
|
- # Read the user's settings. This provides for users to override
|
|
|
|
|
|
+ # Read the user's settings. This provides for users to override
|
|
# and/or append hadoop-env.sh. It is not meant as a complete system override.
|
|
# and/or append hadoop-env.sh. It is not meant as a complete system override.
|
|
|
|
|
|
if [[ -f "${HOME}/.hadooprc" ]]; then
|
|
if [[ -f "${HOME}/.hadooprc" ]]; then
|
|
@@ -155,6 +155,87 @@ function hadoop_exec_hadooprc
|
|
fi
|
|
fi
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+function hadoop_import_shellprofiles
|
|
|
|
+{
|
|
|
|
+ local i
|
|
|
|
+ local files1
|
|
|
|
+ local files2
|
|
|
|
+
|
|
|
|
+ if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
|
|
|
|
+ files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*)
|
|
|
|
+ else
|
|
|
|
+ hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
|
|
|
|
+ fi
|
|
|
|
+
|
|
|
|
+ if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then
|
|
|
|
+ files2=(${HADOOP_CONF_DIR}/shellprofile.d/*)
|
|
|
|
+ fi
|
|
|
|
+
|
|
|
|
+ for i in "${files1[@]}" "${files2[@]}"
|
|
|
|
+ do
|
|
|
|
+ if [[ -n "${i}" ]]; then
|
|
|
|
+ hadoop_debug "Profiles: importing ${i}"
|
|
|
|
+ . "${i}"
|
|
|
|
+ fi
|
|
|
|
+ done
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+function hadoop_shellprofiles_init
|
|
|
|
+{
|
|
|
|
+ local i
|
|
|
|
+
|
|
|
|
+ for i in ${HADOOP_SHELL_PROFILES}
|
|
|
|
+ do
|
|
|
|
+ if declare -F _${i}_hadoop_init >/dev/null ; then
|
|
|
|
+ hadoop_debug "Profiles: ${i} init"
|
|
|
|
+ # shellcheck disable=SC2086
|
|
|
|
+ _${i}_hadoop_init
|
|
|
|
+ fi
|
|
|
|
+ done
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+function hadoop_shellprofiles_classpath
|
|
|
|
+{
|
|
|
|
+ local i
|
|
|
|
+
|
|
|
|
+ for i in ${HADOOP_SHELL_PROFILES}
|
|
|
|
+ do
|
|
|
|
+ if declare -F _${i}_hadoop_classpath >/dev/null ; then
|
|
|
|
+ hadoop_debug "Profiles: ${i} classpath"
|
|
|
|
+ # shellcheck disable=SC2086
|
|
|
|
+ _${i}_hadoop_classpath
|
|
|
|
+ fi
|
|
|
|
+ done
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+function hadoop_shellprofiles_nativelib
|
|
|
|
+{
|
|
|
|
+ local i
|
|
|
|
+
|
|
|
|
+ for i in ${HADOOP_SHELL_PROFILES}
|
|
|
|
+ do
|
|
|
|
+ if declare -F _${i}_hadoop_nativelib >/dev/null ; then
|
|
|
|
+ hadoop_debug "Profiles: ${i} nativelib"
|
|
|
|
+ # shellcheck disable=SC2086
|
|
|
|
+ _${i}_hadoop_nativelib
|
|
|
|
+ fi
|
|
|
|
+ done
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+function hadoop_shellprofiles_finalize
|
|
|
|
+{
|
|
|
|
+ local i
|
|
|
|
+
|
|
|
|
+ for i in ${HADOOP_SHELL_PROFILES}
|
|
|
|
+ do
|
|
|
|
+ if declare -F _${i}_hadoop_finalize >/dev/null ; then
|
|
|
|
+ hadoop_debug "Profiles: ${i} finalize"
|
|
|
|
+ # shellcheck disable=SC2086
|
|
|
|
+ _${i}_hadoop_finalize
|
|
|
|
+ fi
|
|
|
|
+ done
|
|
|
|
+}
|
|
|
|
+
|
|
function hadoop_basic_init
|
|
function hadoop_basic_init
|
|
{
|
|
{
|
|
# Some of these are also set in hadoop-env.sh.
|
|
# Some of these are also set in hadoop-env.sh.
|
|
@@ -163,37 +244,36 @@ function hadoop_basic_init
|
|
#
|
|
#
|
|
# but it is important to note that if you update these
|
|
# but it is important to note that if you update these
|
|
# you also need to update hadoop-env.sh as well!!!
|
|
# you also need to update hadoop-env.sh as well!!!
|
|
-
|
|
|
|
- # CLASSPATH initially contains $HADOOP_CONF_DIR
|
|
|
|
- CLASSPATH="${HADOOP_CONF_DIR}"
|
|
|
|
- hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}"
|
|
|
|
-
|
|
|
|
|
|
+
|
|
|
|
+ CLASSPATH=""
|
|
|
|
+ hadoop_debug "Initialize CLASSPATH"
|
|
|
|
+
|
|
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
|
|
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
|
|
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
|
|
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
|
|
export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
|
|
export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# default policy file for service-level authorization
|
|
# default policy file for service-level authorization
|
|
HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
|
|
HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
|
|
-
|
|
|
|
|
|
+
|
|
# define HADOOP_HDFS_HOME
|
|
# define HADOOP_HDFS_HOME
|
|
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
|
|
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
|
|
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
|
|
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
|
|
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
|
|
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# define HADOOP_YARN_HOME
|
|
# define HADOOP_YARN_HOME
|
|
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
|
|
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
|
|
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
|
|
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
|
|
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
|
|
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# define HADOOP_MAPRED_HOME
|
|
# define HADOOP_MAPRED_HOME
|
|
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
|
|
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
|
|
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
|
|
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
|
|
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
|
|
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
|
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
|
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
|
|
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
|
|
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
|
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
|
@@ -240,7 +320,7 @@ function hadoop_rotate_log
|
|
#
|
|
#
|
|
local log=$1;
|
|
local log=$1;
|
|
local num=${2:-5};
|
|
local num=${2:-5};
|
|
-
|
|
|
|
|
|
+
|
|
if [[ -f "${log}" ]]; then # rotate logs
|
|
if [[ -f "${log}" ]]; then # rotate logs
|
|
while [[ ${num} -gt 1 ]]; do
|
|
while [[ ${num} -gt 1 ]]; do
|
|
#shellcheck disable=SC2086
|
|
#shellcheck disable=SC2086
|
|
@@ -260,7 +340,7 @@ function hadoop_actual_ssh
|
|
# should get hostname followed by rest of command line
|
|
# should get hostname followed by rest of command line
|
|
local slave=$1
|
|
local slave=$1
|
|
shift
|
|
shift
|
|
-
|
|
|
|
|
|
+
|
|
# shellcheck disable=SC2086
|
|
# shellcheck disable=SC2086
|
|
ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
|
|
ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
|
|
}
|
|
}
|
|
@@ -269,7 +349,7 @@ function hadoop_connect_to_hosts
|
|
{
|
|
{
|
|
# shellcheck disable=SC2124
|
|
# shellcheck disable=SC2124
|
|
local params="$@"
|
|
local params="$@"
|
|
-
|
|
|
|
|
|
+
|
|
#
|
|
#
|
|
# ssh (or whatever) to a host
|
|
# ssh (or whatever) to a host
|
|
#
|
|
#
|
|
@@ -278,13 +358,13 @@ function hadoop_connect_to_hosts
|
|
hadoop_error "ERROR: Both HADOOP_SLAVES and HADOOP_SLAVE_NAME were defined. Aborting."
|
|
hadoop_error "ERROR: Both HADOOP_SLAVES and HADOOP_SLAVE_NAME were defined. Aborting."
|
|
exit 1
|
|
exit 1
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
if [[ -n "${HADOOP_SLAVE_NAMES}" ]] ; then
|
|
if [[ -n "${HADOOP_SLAVE_NAMES}" ]] ; then
|
|
SLAVE_NAMES=${HADOOP_SLAVE_NAMES}
|
|
SLAVE_NAMES=${HADOOP_SLAVE_NAMES}
|
|
else
|
|
else
|
|
SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
|
|
SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# if pdsh is available, let's use it. otherwise default
|
|
# if pdsh is available, let's use it. otherwise default
|
|
# to a loop around ssh. (ugh)
|
|
# to a loop around ssh. (ugh)
|
|
if [[ -e '/usr/bin/pdsh' ]]; then
|
|
if [[ -e '/usr/bin/pdsh' ]]; then
|
|
@@ -304,7 +384,7 @@ function hadoop_connect_to_hosts
|
|
if [[ -z "${SLAVE_NAMES}" ]]; then
|
|
if [[ -z "${SLAVE_NAMES}" ]]; then
|
|
SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${SLAVE_FILE}")
|
|
SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${SLAVE_FILE}")
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# quoting here gets tricky. it's easier to push it into a function
|
|
# quoting here gets tricky. it's easier to push it into a function
|
|
# so that we don't have to deal with it. However...
|
|
# so that we don't have to deal with it. However...
|
|
# xargs can't use a function so instead we'll export it out
|
|
# xargs can't use a function so instead we'll export it out
|
|
@@ -314,8 +394,8 @@ function hadoop_connect_to_hosts
|
|
export HADOOP_SSH_OPTS
|
|
export HADOOP_SSH_OPTS
|
|
|
|
|
|
# xargs is used with option -I to replace the placeholder in arguments
|
|
# xargs is used with option -I to replace the placeholder in arguments
|
|
- # list with each hostname read from stdin/pipe. But it consider one
|
|
|
|
- # line as one argument while reading from stdin/pipe. So place each
|
|
|
|
|
|
+ # list with each hostname read from stdin/pipe. But it consider one
|
|
|
|
+ # line as one argument while reading from stdin/pipe. So place each
|
|
# hostname in different lines while passing via pipe.
|
|
# hostname in different lines while passing via pipe.
|
|
SLAVE_NAMES=$(echo "$SLAVE_NAMES" | tr ' ' '\n' )
|
|
SLAVE_NAMES=$(echo "$SLAVE_NAMES" | tr ' ' '\n' )
|
|
echo "${SLAVE_NAMES}" | \
|
|
echo "${SLAVE_NAMES}" | \
|
|
@@ -360,6 +440,12 @@ function hadoop_add_param
|
|
fi
|
|
fi
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+function hadoop_add_profile
|
|
|
|
+{
|
|
|
|
+ # shellcheck disable=SC2086
|
|
|
|
+ hadoop_add_param HADOOP_SHELL_PROFILES $1 $1
|
|
|
|
+}
|
|
|
|
+
|
|
function hadoop_add_classpath
|
|
function hadoop_add_classpath
|
|
{
|
|
{
|
|
# two params:
|
|
# two params:
|
|
@@ -368,7 +454,7 @@ function hadoop_add_classpath
|
|
# classpath this object should go. default is after
|
|
# classpath this object should go. default is after
|
|
# return 0 = success (added or duplicate)
|
|
# return 0 = success (added or duplicate)
|
|
# return 1 = failure (doesn't exist, whatever)
|
|
# return 1 = failure (doesn't exist, whatever)
|
|
-
|
|
|
|
|
|
+
|
|
# However, with classpath (& JLP), we can do dedupe
|
|
# However, with classpath (& JLP), we can do dedupe
|
|
# along with some sanity checking (e.g., missing directories)
|
|
# along with some sanity checking (e.g., missing directories)
|
|
# since we have a better idea of what is legal
|
|
# since we have a better idea of what is legal
|
|
@@ -381,7 +467,7 @@ function hadoop_add_classpath
|
|
hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
|
|
hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
|
|
return 1
|
|
return 1
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# no wildcard in the middle, so check existence
|
|
# no wildcard in the middle, so check existence
|
|
# (doesn't matter *what* it is)
|
|
# (doesn't matter *what* it is)
|
|
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
|
|
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
|
|
@@ -413,7 +499,7 @@ function hadoop_add_colonpath
|
|
# classpath this object should go
|
|
# classpath this object should go
|
|
# return 0 = success
|
|
# return 0 = success
|
|
# return 1 = failure (duplicate)
|
|
# return 1 = failure (duplicate)
|
|
-
|
|
|
|
|
|
+
|
|
# this is CLASSPATH, JLP, etc but with dedupe but no
|
|
# this is CLASSPATH, JLP, etc but with dedupe but no
|
|
# other checking
|
|
# other checking
|
|
if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
|
|
if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
|
|
@@ -446,101 +532,30 @@ function hadoop_add_ldlibpath
|
|
{
|
|
{
|
|
# specialized function for a common use case
|
|
# specialized function for a common use case
|
|
hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
|
|
hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
|
|
-
|
|
|
|
|
|
+
|
|
# note that we export this
|
|
# note that we export this
|
|
export LD_LIBRARY_PATH
|
|
export LD_LIBRARY_PATH
|
|
}
|
|
}
|
|
|
|
|
|
-function hadoop_add_to_classpath_common
|
|
|
|
|
|
+function hadoop_add_common_to_classpath
|
|
{
|
|
{
|
|
-
|
|
|
|
#
|
|
#
|
|
# get all of the common jars+config in the path
|
|
# get all of the common jars+config in the path
|
|
#
|
|
#
|
|
-
|
|
|
|
|
|
+
|
|
# developers
|
|
# developers
|
|
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
|
|
if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
|
|
if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
|
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
|
|
}
|
|
}
|
|
|
|
|
|
-function hadoop_add_to_classpath_hdfs
|
|
|
|
-{
|
|
|
|
- #
|
|
|
|
- # get all of the hdfs jars+config in the path
|
|
|
|
- #
|
|
|
|
- # developers
|
|
|
|
- if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
|
|
|
|
- hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
|
|
|
|
- fi
|
|
|
|
-
|
|
|
|
- # put hdfs in classpath if present
|
|
|
|
- if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
|
|
|
|
- hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
|
|
|
|
- fi
|
|
|
|
-
|
|
|
|
- hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
|
|
|
|
- hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-function hadoop_add_to_classpath_yarn
|
|
|
|
-{
|
|
|
|
- local i
|
|
|
|
- #
|
|
|
|
- # get all of the yarn jars+config in the path
|
|
|
|
- #
|
|
|
|
- # developers
|
|
|
|
- if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
|
|
|
|
- for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
|
|
|
|
- yarn-server/yarn-server-nodemanager \
|
|
|
|
- yarn-server/yarn-server-common \
|
|
|
|
- yarn-server/yarn-server-resourcemanager; do
|
|
|
|
- hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
|
|
|
|
- done
|
|
|
|
-
|
|
|
|
- hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
|
|
|
|
- fi
|
|
|
|
-
|
|
|
|
- if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
|
|
|
|
- hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
|
|
|
|
- fi
|
|
|
|
-
|
|
|
|
- hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
|
|
|
|
- hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-function hadoop_add_to_classpath_mapred
|
|
|
|
-{
|
|
|
|
- #
|
|
|
|
- # get all of the mapreduce jars+config in the path
|
|
|
|
- #
|
|
|
|
- # developers
|
|
|
|
- if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
|
|
|
|
- fi
|
|
|
|
-
|
|
|
|
- if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
|
|
|
|
- fi
|
|
|
|
-
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
|
|
|
|
- hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
function hadoop_add_to_classpath_userpath
|
|
function hadoop_add_to_classpath_userpath
|
|
{
|
|
{
|
|
# Add the user-specified HADOOP_CLASSPATH to the
|
|
# Add the user-specified HADOOP_CLASSPATH to the
|
|
@@ -563,7 +578,7 @@ function hadoop_add_to_classpath_userpath
|
|
let c+=1
|
|
let c+=1
|
|
done
|
|
done
|
|
let j=c-1
|
|
let j=c-1
|
|
-
|
|
|
|
|
|
+
|
|
if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
|
|
if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
|
|
if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
|
|
if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
|
|
for ((i=j; i>=0; i--)); do
|
|
for ((i=j; i>=0; i--)); do
|
|
@@ -602,8 +617,8 @@ function hadoop_os_tricks
|
|
# NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it
|
|
# NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it
|
|
# undocumented in hadoop-env.sh because we don't want users to
|
|
# undocumented in hadoop-env.sh because we don't want users to
|
|
# shoot themselves in the foot while devs make IPv6 work.
|
|
# shoot themselves in the foot while devs make IPv6 work.
|
|
- if [[ -n "${bindv6only}" ]] &&
|
|
|
|
- [[ "${bindv6only}" -eq "1" ]] &&
|
|
|
|
|
|
+ if [[ -n "${bindv6only}" ]] &&
|
|
|
|
+ [[ "${bindv6only}" -eq "1" ]] &&
|
|
[[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
|
|
[[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
|
|
hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
|
|
hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
|
|
hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
|
|
hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
|
|
@@ -630,14 +645,14 @@ function hadoop_java_setup
|
|
hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
|
|
hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
|
|
exit 1
|
|
exit 1
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
if [[ ! -d "${JAVA_HOME}" ]]; then
|
|
if [[ ! -d "${JAVA_HOME}" ]]; then
|
|
hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
|
|
hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
|
|
exit 1
|
|
exit 1
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
JAVA="${JAVA_HOME}/bin/java"
|
|
JAVA="${JAVA_HOME}/bin/java"
|
|
-
|
|
|
|
|
|
+
|
|
if [[ ! -x "$JAVA" ]]; then
|
|
if [[ ! -x "$JAVA" ]]; then
|
|
hadoop_error "ERROR: $JAVA is not executable."
|
|
hadoop_error "ERROR: $JAVA is not executable."
|
|
exit 1
|
|
exit 1
|
|
@@ -716,7 +731,7 @@ function hadoop_finalize_hadoop_opts
|
|
function hadoop_finalize_classpath
|
|
function hadoop_finalize_classpath
|
|
{
|
|
{
|
|
hadoop_add_classpath "${HADOOP_CONF_DIR}" before
|
|
hadoop_add_classpath "${HADOOP_CONF_DIR}" before
|
|
-
|
|
|
|
|
|
+
|
|
# user classpath gets added at the last minute. this allows
|
|
# user classpath gets added at the last minute. this allows
|
|
# override of CONF dirs and more
|
|
# override of CONF dirs and more
|
|
hadoop_add_to_classpath_userpath
|
|
hadoop_add_to_classpath_userpath
|
|
@@ -744,8 +759,9 @@ function hadoop_finalize_catalina_opts
|
|
|
|
|
|
function hadoop_finalize
|
|
function hadoop_finalize
|
|
{
|
|
{
|
|
- # user classpath gets added at the last minute. this allows
|
|
|
|
- # override of CONF dirs and more
|
|
|
|
|
|
+
|
|
|
|
+ hadoop_shellprofiles_finalize
|
|
|
|
+
|
|
hadoop_finalize_classpath
|
|
hadoop_finalize_classpath
|
|
hadoop_finalize_libpaths
|
|
hadoop_finalize_libpaths
|
|
hadoop_finalize_hadoop_heap
|
|
hadoop_finalize_hadoop_heap
|
|
@@ -782,7 +798,7 @@ function hadoop_verify_secure_prereq
|
|
# if you are on an OS like Illumos that has functional roles
|
|
# if you are on an OS like Illumos that has functional roles
|
|
# and you are using pfexec, you'll probably want to change
|
|
# and you are using pfexec, you'll probably want to change
|
|
# this.
|
|
# this.
|
|
-
|
|
|
|
|
|
+
|
|
# ${EUID} comes from the shell itself!
|
|
# ${EUID} comes from the shell itself!
|
|
if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
|
|
hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
|
|
@@ -795,7 +811,7 @@ function hadoop_verify_secure_prereq
|
|
function hadoop_setup_secure_service
|
|
function hadoop_setup_secure_service
|
|
{
|
|
{
|
|
# need a more complicated setup? replace me!
|
|
# need a more complicated setup? replace me!
|
|
-
|
|
|
|
|
|
+
|
|
HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
|
|
HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
|
|
HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
|
|
HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
|
|
}
|
|
}
|
|
@@ -844,7 +860,7 @@ function hadoop_verify_logdir
|
|
rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
|
|
rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
|
|
}
|
|
}
|
|
|
|
|
|
-function hadoop_status_daemon()
|
|
|
|
|
|
+function hadoop_status_daemon()
|
|
{
|
|
{
|
|
#
|
|
#
|
|
# LSB 4.1.0 compatible status command (1)
|
|
# LSB 4.1.0 compatible status command (1)
|
|
@@ -861,12 +877,12 @@ function hadoop_status_daemon()
|
|
# them the benefit of a doubt and returning 1 even if
|
|
# them the benefit of a doubt and returning 1 even if
|
|
# our pid is not in in /var/run .
|
|
# our pid is not in in /var/run .
|
|
#
|
|
#
|
|
-
|
|
|
|
|
|
+
|
|
local pidfile=$1
|
|
local pidfile=$1
|
|
shift
|
|
shift
|
|
-
|
|
|
|
|
|
+
|
|
local pid
|
|
local pid
|
|
-
|
|
|
|
|
|
+
|
|
if [[ -f "${pidfile}" ]]; then
|
|
if [[ -f "${pidfile}" ]]; then
|
|
pid=$(cat "${pidfile}")
|
|
pid=$(cat "${pidfile}")
|
|
if ps -p "${pid}" > /dev/null 2>&1; then
|
|
if ps -p "${pid}" > /dev/null 2>&1; then
|
|
@@ -885,7 +901,7 @@ function hadoop_java_exec
|
|
local command=$1
|
|
local command=$1
|
|
local class=$2
|
|
local class=$2
|
|
shift 2
|
|
shift 2
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
|
|
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
|
|
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
|
|
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
|
|
|
|
|
|
@@ -932,14 +948,14 @@ function hadoop_start_daemon_wrapper
|
|
local counter
|
|
local counter
|
|
|
|
|
|
hadoop_rotate_log "${outfile}"
|
|
hadoop_rotate_log "${outfile}"
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_start_daemon "${daemonname}" \
|
|
hadoop_start_daemon "${daemonname}" \
|
|
"$class" \
|
|
"$class" \
|
|
"${pidfile}" \
|
|
"${pidfile}" \
|
|
"$@" >> "${outfile}" 2>&1 < /dev/null &
|
|
"$@" >> "${outfile}" 2>&1 < /dev/null &
|
|
|
|
|
|
# we need to avoid a race condition here
|
|
# we need to avoid a race condition here
|
|
- # so let's wait for the fork to finish
|
|
|
|
|
|
+ # so let's wait for the fork to finish
|
|
# before overriding with the daemonized pid
|
|
# before overriding with the daemonized pid
|
|
(( counter=0 ))
|
|
(( counter=0 ))
|
|
while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
|
|
while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
|
|
@@ -953,23 +969,23 @@ function hadoop_start_daemon_wrapper
|
|
if [[ $? -gt 0 ]]; then
|
|
if [[ $? -gt 0 ]]; then
|
|
hadoop_error "ERROR: Cannot write ${daemonname} pid ${pidfile}."
|
|
hadoop_error "ERROR: Cannot write ${daemonname} pid ${pidfile}."
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# shellcheck disable=SC2086
|
|
# shellcheck disable=SC2086
|
|
renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
|
|
renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
|
|
if [[ $? -gt 0 ]]; then
|
|
if [[ $? -gt 0 ]]; then
|
|
hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
|
|
hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# shellcheck disable=SC2086
|
|
# shellcheck disable=SC2086
|
|
disown %+ >/dev/null 2>&1
|
|
disown %+ >/dev/null 2>&1
|
|
if [[ $? -gt 0 ]]; then
|
|
if [[ $? -gt 0 ]]; then
|
|
hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
|
|
hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
|
|
fi
|
|
fi
|
|
sleep 1
|
|
sleep 1
|
|
-
|
|
|
|
|
|
+
|
|
# capture the ulimit output
|
|
# capture the ulimit output
|
|
ulimit -a >> "${outfile}" 2>&1
|
|
ulimit -a >> "${outfile}" 2>&1
|
|
-
|
|
|
|
|
|
+
|
|
# shellcheck disable=SC2086
|
|
# shellcheck disable=SC2086
|
|
if ! ps -p $! >/dev/null 2>&1; then
|
|
if ! ps -p $! >/dev/null 2>&1; then
|
|
return 1
|
|
return 1
|
|
@@ -983,22 +999,22 @@ function hadoop_start_secure_daemon
|
|
#
|
|
#
|
|
local daemonname=$1
|
|
local daemonname=$1
|
|
local class=$2
|
|
local class=$2
|
|
-
|
|
|
|
|
|
+
|
|
# pid file to create for our deamon
|
|
# pid file to create for our deamon
|
|
local daemonpidfile=$3
|
|
local daemonpidfile=$3
|
|
-
|
|
|
|
|
|
+
|
|
# where to send stdout. jsvc has bad habits so this *may* be &1
|
|
# where to send stdout. jsvc has bad habits so this *may* be &1
|
|
# which means you send it to stdout!
|
|
# which means you send it to stdout!
|
|
local daemonoutfile=$4
|
|
local daemonoutfile=$4
|
|
-
|
|
|
|
|
|
+
|
|
# where to send stderr. same thing, except &2 = stderr
|
|
# where to send stderr. same thing, except &2 = stderr
|
|
local daemonerrfile=$5
|
|
local daemonerrfile=$5
|
|
local privpidfile=$6
|
|
local privpidfile=$6
|
|
shift 6
|
|
shift 6
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_rotate_log "${daemonoutfile}"
|
|
hadoop_rotate_log "${daemonoutfile}"
|
|
hadoop_rotate_log "${daemonerrfile}"
|
|
hadoop_rotate_log "${daemonerrfile}"
|
|
-
|
|
|
|
|
|
+
|
|
jsvc="${JSVC_HOME}/jsvc"
|
|
jsvc="${JSVC_HOME}/jsvc"
|
|
if [[ ! -f "${jsvc}" ]]; then
|
|
if [[ ! -f "${jsvc}" ]]; then
|
|
hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
|
|
hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
|
|
@@ -1007,7 +1023,7 @@ function hadoop_start_secure_daemon
|
|
hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
|
|
hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
|
|
exit 1
|
|
exit 1
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
# note that shellcheck will throw a
|
|
# note that shellcheck will throw a
|
|
# bogus for-our-use-case 2086 here.
|
|
# bogus for-our-use-case 2086 here.
|
|
# it doesn't properly support multi-line situations
|
|
# it doesn't properly support multi-line situations
|
|
@@ -1020,7 +1036,7 @@ function hadoop_start_secure_daemon
|
|
if [[ $? -gt 0 ]]; then
|
|
if [[ $? -gt 0 ]]; then
|
|
hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}."
|
|
hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}."
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
exec "${jsvc}" \
|
|
exec "${jsvc}" \
|
|
"-Dproc_${daemonname}" \
|
|
"-Dproc_${daemonname}" \
|
|
-outfile "${daemonoutfile}" \
|
|
-outfile "${daemonoutfile}" \
|
|
@@ -1039,29 +1055,29 @@ function hadoop_start_secure_daemon_wrapper
|
|
# of the dirty work to launch a daemon in the background!
|
|
# of the dirty work to launch a daemon in the background!
|
|
local daemonname=$1
|
|
local daemonname=$1
|
|
local class=$2
|
|
local class=$2
|
|
-
|
|
|
|
|
|
+
|
|
# same rules as hadoop_start_secure_daemon except we
|
|
# same rules as hadoop_start_secure_daemon except we
|
|
# have some additional parameters
|
|
# have some additional parameters
|
|
-
|
|
|
|
|
|
+
|
|
local daemonpidfile=$3
|
|
local daemonpidfile=$3
|
|
-
|
|
|
|
|
|
+
|
|
local daemonoutfile=$4
|
|
local daemonoutfile=$4
|
|
-
|
|
|
|
|
|
+
|
|
# the pid file of the subprocess that spawned our
|
|
# the pid file of the subprocess that spawned our
|
|
# secure launcher
|
|
# secure launcher
|
|
local jsvcpidfile=$5
|
|
local jsvcpidfile=$5
|
|
-
|
|
|
|
|
|
+
|
|
# the output of the subprocess that spawned our secure
|
|
# the output of the subprocess that spawned our secure
|
|
# launcher
|
|
# launcher
|
|
local jsvcoutfile=$6
|
|
local jsvcoutfile=$6
|
|
-
|
|
|
|
|
|
+
|
|
local daemonerrfile=$7
|
|
local daemonerrfile=$7
|
|
shift 7
|
|
shift 7
|
|
|
|
|
|
local counter
|
|
local counter
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_rotate_log "${jsvcoutfile}"
|
|
hadoop_rotate_log "${jsvcoutfile}"
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_start_secure_daemon \
|
|
hadoop_start_secure_daemon \
|
|
"${daemonname}" \
|
|
"${daemonname}" \
|
|
"${class}" \
|
|
"${class}" \
|
|
@@ -1071,7 +1087,7 @@ function hadoop_start_secure_daemon_wrapper
|
|
"${jsvcpidfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
|
|
"${jsvcpidfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
|
|
|
|
|
|
# we need to avoid a race condition here
|
|
# we need to avoid a race condition here
|
|
- # so let's wait for the fork to finish
|
|
|
|
|
|
+ # so let's wait for the fork to finish
|
|
# before overriding with the daemonized pid
|
|
# before overriding with the daemonized pid
|
|
(( counter=0 ))
|
|
(( counter=0 ))
|
|
while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do
|
|
while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do
|
|
@@ -1085,7 +1101,7 @@ function hadoop_start_secure_daemon_wrapper
|
|
if [[ $? -gt 0 ]]; then
|
|
if [[ $? -gt 0 ]]; then
|
|
hadoop_error "ERROR: Cannot write ${daemonname} pid ${daemonpidfile}."
|
|
hadoop_error "ERROR: Cannot write ${daemonname} pid ${daemonpidfile}."
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
sleep 1
|
|
sleep 1
|
|
#shellcheck disable=SC2086
|
|
#shellcheck disable=SC2086
|
|
renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
|
|
renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
|
|
@@ -1118,12 +1134,12 @@ function hadoop_stop_daemon
|
|
local cmd=$1
|
|
local cmd=$1
|
|
local pidfile=$2
|
|
local pidfile=$2
|
|
shift 2
|
|
shift 2
|
|
-
|
|
|
|
|
|
+
|
|
local pid
|
|
local pid
|
|
-
|
|
|
|
|
|
+
|
|
if [[ -f "${pidfile}" ]]; then
|
|
if [[ -f "${pidfile}" ]]; then
|
|
pid=$(cat "$pidfile")
|
|
pid=$(cat "$pidfile")
|
|
-
|
|
|
|
|
|
+
|
|
kill "${pid}" >/dev/null 2>&1
|
|
kill "${pid}" >/dev/null 2>&1
|
|
sleep "${HADOOP_STOP_TIMEOUT}"
|
|
sleep "${HADOOP_STOP_TIMEOUT}"
|
|
if kill -0 "${pid}" > /dev/null 2>&1; then
|
|
if kill -0 "${pid}" > /dev/null 2>&1; then
|
|
@@ -1145,7 +1161,7 @@ function hadoop_stop_secure_daemon
|
|
local privpidfile=$3
|
|
local privpidfile=$3
|
|
shift 3
|
|
shift 3
|
|
local ret
|
|
local ret
|
|
-
|
|
|
|
|
|
+
|
|
hadoop_stop_daemon "${command}" "${daemonpidfile}"
|
|
hadoop_stop_daemon "${command}" "${daemonpidfile}"
|
|
ret=$?
|
|
ret=$?
|
|
rm -f "${daemonpidfile}" "${privpidfile}" 2>/dev/null
|
|
rm -f "${daemonpidfile}" "${privpidfile}" 2>/dev/null
|
|
@@ -1160,18 +1176,18 @@ function hadoop_daemon_handler
|
|
local daemon_pidfile=$4
|
|
local daemon_pidfile=$4
|
|
local daemon_outfile=$5
|
|
local daemon_outfile=$5
|
|
shift 5
|
|
shift 5
|
|
-
|
|
|
|
|
|
+
|
|
case ${daemonmode} in
|
|
case ${daemonmode} in
|
|
status)
|
|
status)
|
|
hadoop_status_daemon "${daemon_pidfile}"
|
|
hadoop_status_daemon "${daemon_pidfile}"
|
|
exit $?
|
|
exit $?
|
|
;;
|
|
;;
|
|
-
|
|
|
|
|
|
+
|
|
stop)
|
|
stop)
|
|
hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
|
|
hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
|
|
exit $?
|
|
exit $?
|
|
;;
|
|
;;
|
|
-
|
|
|
|
|
|
+
|
|
##COMPAT -- older hadoops would also start daemons by default
|
|
##COMPAT -- older hadoops would also start daemons by default
|
|
start|default)
|
|
start|default)
|
|
hadoop_verify_piddir
|
|
hadoop_verify_piddir
|
|
@@ -1207,19 +1223,19 @@ function hadoop_secure_daemon_handler
|
|
local priv_outfile=$7
|
|
local priv_outfile=$7
|
|
local priv_errfile=$8
|
|
local priv_errfile=$8
|
|
shift 8
|
|
shift 8
|
|
-
|
|
|
|
|
|
+
|
|
case ${daemonmode} in
|
|
case ${daemonmode} in
|
|
status)
|
|
status)
|
|
hadoop_status_daemon "${daemon_pidfile}"
|
|
hadoop_status_daemon "${daemon_pidfile}"
|
|
exit $?
|
|
exit $?
|
|
;;
|
|
;;
|
|
-
|
|
|
|
|
|
+
|
|
stop)
|
|
stop)
|
|
hadoop_stop_secure_daemon "${daemonname}" \
|
|
hadoop_stop_secure_daemon "${daemonname}" \
|
|
"${daemon_pidfile}" "${priv_pidfile}"
|
|
"${daemon_pidfile}" "${priv_pidfile}"
|
|
exit $?
|
|
exit $?
|
|
;;
|
|
;;
|
|
-
|
|
|
|
|
|
+
|
|
##COMPAT -- older hadoops would also start daemons by default
|
|
##COMPAT -- older hadoops would also start daemons by default
|
|
start|default)
|
|
start|default)
|
|
hadoop_verify_piddir
|
|
hadoop_verify_piddir
|
|
@@ -1232,7 +1248,7 @@ function hadoop_secure_daemon_handler
|
|
# stale pid file, so just remove it and continue on
|
|
# stale pid file, so just remove it and continue on
|
|
rm -f "${daemon_pidfile}" >/dev/null 2>&1
|
|
rm -f "${daemon_pidfile}" >/dev/null 2>&1
|
|
fi
|
|
fi
|
|
-
|
|
|
|
|
|
+
|
|
##COMPAT - differenticate between --daemon start and nothing
|
|
##COMPAT - differenticate between --daemon start and nothing
|
|
# "nothing" shouldn't detach
|
|
# "nothing" shouldn't detach
|
|
if [[ "${daemonmode}" = "default" ]]; then
|
|
if [[ "${daemonmode}" = "default" ]]; then
|