123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181 |
- #!/usr/bin/env bash
- # Licensed to the Apache Software Foundation (ASF) under one or more
- # contributor license agreements. See the NOTICE file distributed with
- # this work for additional information regarding copyright ownership.
- # The ASF licenses this file to You under the Apache License, Version 2.0
- # (the "License"); you may not use this file except in compliance with
- # the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # Start hadoop dfs daemons.
- # Optinally upgrade or rollback dfs state.
- # Run this on master node.
- ## startup matrix:
- #
- # if $EUID != 0, then exec
- # if $EUID =0 then
- # if hdfs_subcmd_user is defined, su to that user, exec
- # if hdfs_subcmd_user is not defined, error
- #
- # For secure daemons, this means both the secure and insecure env vars need to be
- # defined. e.g., HDFS_DATANODE_USER=root HADOOP_SECURE_DN_USER=hdfs
- #
- ## @description usage info
- ## @audience private
- ## @stability evolving
- ## @replaceable no
- function hadoop_usage
- {
- echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
- }
- this="${BASH_SOURCE-$0}"
- bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
- # let's locate libexec...
- if [[ -n "${HADOOP_HOME}" ]]; then
- HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
- else
- HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
- fi
- HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
- # shellcheck disable=SC2034
- HADOOP_NEW_CONFIG=true
- if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
- . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
- else
- echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
- exit 1
- fi
- # get arguments
- if [[ $# -ge 1 ]]; then
- startOpt="$1"
- shift
- case "$startOpt" in
- -upgrade)
- nameStartOpt="$startOpt"
- ;;
- -rollback)
- dataStartOpt="$startOpt"
- ;;
- *)
- hadoop_exit_with_usage 1
- ;;
- esac
- fi
- #Add other possible options
- nameStartOpt="$nameStartOpt $*"
- #---------------------------------------------------------
- # namenodes
- NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
- if [[ -z "${NAMENODES}" ]]; then
- NAMENODES=$(hostname)
- fi
- echo "Starting namenodes on [${NAMENODES}]"
- hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
- --workers \
- --config "${HADOOP_CONF_DIR}" \
- --hostnames "${NAMENODES}" \
- --daemon start \
- namenode ${nameStartOpt}
- HADOOP_JUMBO_RETCOUNTER=$?
- #---------------------------------------------------------
- # datanodes (using default workers file)
- echo "Starting datanodes"
- hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
- --workers \
- --config "${HADOOP_CONF_DIR}" \
- --daemon start \
- datanode ${dataStartOpt}
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
- #---------------------------------------------------------
- # secondary namenodes (if any)
- SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
- if [[ -n "${SECONDARY_NAMENODES}" ]]; then
- if [[ "${NAMENODES}" =~ , ]]; then
- hadoop_error "WARNING: Highly available NameNode is configured."
- hadoop_error "WARNING: Skipping SecondaryNameNode."
- else
- if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
- SECONDARY_NAMENODES=$(hostname)
- fi
- echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
- hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
- --workers \
- --config "${HADOOP_CONF_DIR}" \
- --hostnames "${SECONDARY_NAMENODES}" \
- --daemon start \
- secondarynamenode
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
- fi
- fi
- #---------------------------------------------------------
- # quorumjournal nodes (if any)
- SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
- case "${SHARED_EDITS_DIR}" in
- qjournal://*)
- JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
- echo "Starting journal nodes [${JOURNAL_NODES}]"
- hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
- --workers \
- --config "${HADOOP_CONF_DIR}" \
- --hostnames "${JOURNAL_NODES}" \
- --daemon start \
- journalnode
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
- ;;
- esac
- #---------------------------------------------------------
- # ZK Failover controllers, if auto-HA is enabled
- AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
- if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
- echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
- hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
- --workers \
- --config "${HADOOP_CONF_DIR}" \
- --hostnames "${NAMENODES}" \
- --daemon start \
- zkfc
- (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
- fi
- exit ${HADOOP_JUMBO_RETCOUNTER}
- # eof
|