|
@@ -65,7 +65,7 @@ fi
|
|
|
|
|
|
|
|
|
#Add other possible options
|
|
|
-nameStartOpt="$nameStartOpt $@"
|
|
|
+nameStartOpt="$nameStartOpt $*"
|
|
|
|
|
|
#---------------------------------------------------------
|
|
|
# namenodes
|
|
@@ -76,28 +76,32 @@ if [[ -z "${NAMENODES}" ]]; then
|
|
|
NAMENODES=$(hostname)
|
|
|
fi
|
|
|
|
|
|
-echo "Starting namenodes on [$NAMENODES]"
|
|
|
+echo "Starting namenodes on [${NAMENODES}]"
|
|
|
|
|
|
-"${bin}/hadoop-daemons.sh" \
|
|
|
---config "${HADOOP_CONF_DIR}" \
|
|
|
---hostnames "${NAMENODES}" \
|
|
|
-start namenode ${nameStartOpt}
|
|
|
+"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
+ --slaves \
|
|
|
+ --config "${HADOOP_CONF_DIR}" \
|
|
|
+ --hostnames "${NAMENODES}" \
|
|
|
+ --daemon start \
|
|
|
+ namenode ${nameStartOpt}
|
|
|
|
|
|
#---------------------------------------------------------
|
|
|
# datanodes (using default slaves file)
|
|
|
|
|
|
if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
|
|
|
-[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
|
- echo "ERROR: Attempting to start secure cluster, skipping datanodes. "
|
|
|
- echo "Run start-secure-dns.sh as root or configure "
|
|
|
- echo "\${HADOOP_SECURE_COMMAND} to complete startup."
|
|
|
+ [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
|
+ hadoop_error "ERROR: Attempting to start secure cluster, skipping datanodes. "
|
|
|
+ hadoop_error "ERROR: Run start-secure-dns.sh as root or configure "
|
|
|
+ hadoop_error "ERROR: \${HADOOP_SECURE_COMMAND} to complete startup."
|
|
|
else
|
|
|
-
|
|
|
+
|
|
|
echo "Starting datanodes"
|
|
|
-
|
|
|
- "${bin}/hadoop-daemons.sh" \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- start datanode ${dataStartOpt}
|
|
|
+
|
|
|
+ "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
+ --slaves \
|
|
|
+ --config "${HADOOP_CONF_DIR}" \
|
|
|
+ --daemon start \
|
|
|
+ datanode ${dataStartOpt}
|
|
|
fi
|
|
|
|
|
|
#---------------------------------------------------------
|
|
@@ -105,17 +109,28 @@ fi
|
|
|
|
|
|
SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
|
|
|
|
|
|
-if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
|
|
|
- SECONDARY_NAMENODES=$(hostname)
|
|
|
-fi
|
|
|
-
|
|
|
if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
|
|
- echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
|
|
|
-
|
|
|
- "${bin}/hadoop-daemons.sh" \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${SECONDARY_NAMENODES}" \
|
|
|
- start secondarynamenode
|
|
|
+
|
|
|
+ if [[ "${NAMENODES}" =~ , ]]; then
|
|
|
+
|
|
|
+ hadoop_error "ERROR: Highly available NameNode is configured."
|
|
|
+ hadoop_error "ERROR: Skipping SecondaryNameNode."
|
|
|
+
|
|
|
+ else
|
|
|
+
|
|
|
+ if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
|
|
|
+ SECONDARY_NAMENODES=$(hostname)
|
|
|
+ fi
|
|
|
+
|
|
|
+ echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
|
|
|
+
|
|
|
+ "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
+ --slaves \
|
|
|
+ --config "${HADOOP_CONF_DIR}" \
|
|
|
+ --hostnames "${SECONDARY_NAMENODES}" \
|
|
|
+ --daemon start \
|
|
|
+ secondarynamenode
|
|
|
+ fi
|
|
|
fi
|
|
|
|
|
|
#---------------------------------------------------------
|
|
@@ -127,10 +142,13 @@ case "${SHARED_EDITS_DIR}" in
|
|
|
qjournal://*)
|
|
|
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
|
|
|
echo "Starting journal nodes [${JOURNAL_NODES}]"
|
|
|
- "${bin}/hadoop-daemons.sh" \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${JOURNAL_NODES}" \
|
|
|
- start journalnode
|
|
|
+
|
|
|
+ "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
+ --slaves \
|
|
|
+ --config "${HADOOP_CONF_DIR}" \
|
|
|
+ --hostnames "${JOURNAL_NODES}" \
|
|
|
+ --daemon start \
|
|
|
+ journalnode
|
|
|
;;
|
|
|
esac
|
|
|
|
|
@@ -139,10 +157,13 @@ esac
|
|
|
AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
|
|
|
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
|
|
echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
|
|
- "${bin}/hadoop-daemons.sh" \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${NAMENODES}" \
|
|
|
- start zkfc
|
|
|
+
|
|
|
+ "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
+ --slaves \
|
|
|
+ --config "${HADOOP_CONF_DIR}" \
|
|
|
+ --hostnames "${NAMENODES}" \
|
|
|
+ --daemon start \
|
|
|
+ zkfc
|
|
|
fi
|
|
|
|
|
|
# eof
|