|
@@ -39,11 +39,11 @@ fi
|
|
|
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
|
|
|
# shellcheck disable=SC2034
|
|
|
HADOOP_NEW_CONFIG=true
|
|
|
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
|
|
|
+if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
|
|
|
# shellcheck disable=SC1090
|
|
|
- . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
|
|
|
+ . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
|
|
|
else
|
|
|
- echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
|
|
|
+ echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
|
|
|
exit 1
|
|
|
fi
|
|
|
|
|
@@ -83,28 +83,6 @@ if [[ "${OZONE_ENABLED}" != "true" ]]; then
|
|
|
exit -1
|
|
|
fi
|
|
|
|
|
|
-#---------------------------------------------------------
|
|
|
-# Start hdfs before starting ozone daemons
|
|
|
-
|
|
|
-#---------------------------------------------------------
|
|
|
-# namenodes
|
|
|
-
|
|
|
-NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
|
|
|
-
|
|
|
-if [[ -z "${NAMENODES}" ]]; then
|
|
|
- NAMENODES=$(hostname)
|
|
|
-fi
|
|
|
-
|
|
|
-echo "Starting namenodes on [${NAMENODES}]"
|
|
|
-hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
- --workers \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${NAMENODES}" \
|
|
|
- --daemon start \
|
|
|
- namenode ${nameStartOpt}
|
|
|
-
|
|
|
-HADOOP_JUMBO_RETCOUNTER=$?
|
|
|
-
|
|
|
#---------------------------------------------------------
|
|
|
# datanodes (using default workers file)
|
|
|
|
|
@@ -116,68 +94,6 @@ hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \
|
|
|
datanode ${dataStartOpt}
|
|
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
|
|
|
|
|
-#---------------------------------------------------------
|
|
|
-# secondary namenodes (if any)
|
|
|
-
|
|
|
-SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
|
|
|
-
|
|
|
-if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
|
|
-
|
|
|
- if [[ "${NAMENODES}" =~ , ]]; then
|
|
|
-
|
|
|
- hadoop_error "WARNING: Highly available NameNode is configured."
|
|
|
- hadoop_error "WARNING: Skipping SecondaryNameNode."
|
|
|
-
|
|
|
- else
|
|
|
-
|
|
|
- if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
|
|
|
- SECONDARY_NAMENODES=$(hostname)
|
|
|
- fi
|
|
|
-
|
|
|
- echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
|
|
|
-
|
|
|
- hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
- --workers \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${SECONDARY_NAMENODES}" \
|
|
|
- --daemon start \
|
|
|
- secondarynamenode
|
|
|
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
|
|
- fi
|
|
|
-fi
|
|
|
-
|
|
|
-#---------------------------------------------------------
|
|
|
-# quorumjournal nodes (if any)
|
|
|
-
|
|
|
-JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
|
|
|
-
|
|
|
-if [[ "${#JOURNAL_NODES}" != 0 ]]; then
|
|
|
- echo "Starting journal nodes [${JOURNAL_NODES}]"
|
|
|
-
|
|
|
- hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
- --workers \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${JOURNAL_NODES}" \
|
|
|
- --daemon start \
|
|
|
- journalnode
|
|
|
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
|
|
-fi
|
|
|
-
|
|
|
-#---------------------------------------------------------
|
|
|
-# ZK Failover controllers, if auto-HA is enabled
|
|
|
-AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
|
|
|
-if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
|
|
- echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
|
|
-
|
|
|
- hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
- --workers \
|
|
|
- --config "${HADOOP_CONF_DIR}" \
|
|
|
- --hostnames "${NAMENODES}" \
|
|
|
- --daemon start \
|
|
|
- zkfc
|
|
|
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
|
|
-fi
|
|
|
-
|
|
|
#---------------------------------------------------------
|
|
|
# Ozone ozonemanager nodes
|
|
|
OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null)
|