123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118 |
- #!/usr/bin/env bash
- # Licensed to the Apache Software Foundation (ASF) under one or more
- # contributor license agreements. See the NOTICE file distributed with
- # this work for additional information regarding copyright ownership.
- # The ASF licenses this file to You under the Apache License, Version 2.0
- # (the "License"); you may not use this file except in compliance with
- # the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # Start hadoop dfs daemons.
- # Optinally upgrade or rollback dfs state.
- # Run this on master node.
- usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
- bin=`dirname "${BASH_SOURCE-$0}"`
- bin=`cd "$bin"; pwd`
- DEFAULT_LIBEXEC_DIR="$bin"/../libexec
- HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
- . $HADOOP_LIBEXEC_DIR/hdfs-config.sh
- # get arguments
- if [[ $# -ge 1 ]]; then
- startOpt="$1"
- shift
- case "$startOpt" in
- -upgrade)
- nameStartOpt="$startOpt"
- ;;
- -rollback)
- dataStartOpt="$startOpt"
- ;;
- *)
- echo $usage
- exit 1
- ;;
- esac
- fi
- #Add other possible options
- nameStartOpt="$nameStartOpt $@"
- #---------------------------------------------------------
- # namenodes
- NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
- echo "Starting namenodes on [$NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
- --config "$HADOOP_CONF_DIR" \
- --hostnames "$NAMENODES" \
- --script "$bin/hdfs" start namenode $nameStartOpt
- #---------------------------------------------------------
- # datanodes (using default slaves file)
- if [ -n "$HADOOP_SECURE_DN_USER" ]; then
- echo \
- "Attempting to start secure cluster, skipping datanodes. " \
- "Run start-secure-dns.sh as root to complete startup."
- else
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
- --config "$HADOOP_CONF_DIR" \
- --script "$bin/hdfs" start datanode $dataStartOpt
- fi
- #---------------------------------------------------------
- # secondary namenodes (if any)
- SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
- if [ -n "$SECONDARY_NAMENODES" ]; then
- echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
- --config "$HADOOP_CONF_DIR" \
- --hostnames "$SECONDARY_NAMENODES" \
- --script "$bin/hdfs" start secondarynamenode
- fi
- #---------------------------------------------------------
- # quorumjournal nodes (if any)
- SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
- case "$SHARED_EDITS_DIR" in
- qjournal://*)
- JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
- echo "Starting journal nodes [$JOURNAL_NODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
- --config "$HADOOP_CONF_DIR" \
- --hostnames "$JOURNAL_NODES" \
- --script "$bin/hdfs" start journalnode ;;
- esac
- #---------------------------------------------------------
- # ZK Failover controllers, if auto-HA is enabled
- AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
- if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
- echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
- --config "$HADOOP_CONF_DIR" \
- --hostnames "$NAMENODES" \
- --script "$bin/hdfs" start zkfc
- fi
- # eof
|