start-dfs.sh 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. #!/usr/bin/env bash
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # Start hadoop dfs daemons.
  17. # Optinally upgrade or rollback dfs state.
  18. # Run this on master node.
  19. usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
  20. bin=`dirname "${BASH_SOURCE-$0}"`
  21. bin=`cd "$bin"; pwd`
  22. DEFAULT_LIBEXEC_DIR="$bin"/../libexec
  23. HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
  24. . $HADOOP_LIBEXEC_DIR/hdfs-config.sh
  25. # get arguments
  26. if [[ $# -ge 1 ]]; then
  27. startOpt="$1"
  28. shift
  29. case "$startOpt" in
  30. -upgrade)
  31. nameStartOpt="$startOpt"
  32. ;;
  33. -rollback)
  34. dataStartOpt="$startOpt"
  35. ;;
  36. *)
  37. echo $usage
  38. exit 1
  39. ;;
  40. esac
  41. fi
  42. #Add other possible options
  43. nameStartOpt="$nameStartOpt $@"
  44. #---------------------------------------------------------
  45. # namenodes
  46. NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
  47. echo "Starting namenodes on [$NAMENODES]"
  48. "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
  49. --config "$HADOOP_CONF_DIR" \
  50. --hostnames "$NAMENODES" \
  51. --script "$bin/hdfs" start namenode $nameStartOpt
  52. #---------------------------------------------------------
  53. # datanodes (using default slaves file)
  54. if [ -n "$HADOOP_SECURE_DN_USER" ]; then
  55. echo \
  56. "Attempting to start secure cluster, skipping datanodes. " \
  57. "Run start-secure-dns.sh as root to complete startup."
  58. else
  59. "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
  60. --config "$HADOOP_CONF_DIR" \
  61. --script "$bin/hdfs" start datanode $dataStartOpt
  62. fi
  63. #---------------------------------------------------------
  64. # secondary namenodes (if any)
  65. SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
  66. if [ -n "$SECONDARY_NAMENODES" ]; then
  67. echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
  68. "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
  69. --config "$HADOOP_CONF_DIR" \
  70. --hostnames "$SECONDARY_NAMENODES" \
  71. --script "$bin/hdfs" start secondarynamenode
  72. fi
  73. #---------------------------------------------------------
  74. # quorumjournal nodes (if any)
  75. SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
  76. case "$SHARED_EDITS_DIR" in
  77. qjournal://*)
  78. JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
  79. echo "Starting journal nodes [$JOURNAL_NODES]"
  80. "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
  81. --config "$HADOOP_CONF_DIR" \
  82. --hostnames "$JOURNAL_NODES" \
  83. --script "$bin/hdfs" start journalnode ;;
  84. esac
  85. #---------------------------------------------------------
  86. # ZK Failover controllers, if auto-HA is enabled
  87. AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
  88. if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
  89. echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
  90. "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
  91. --config "$HADOOP_CONF_DIR" \
  92. --hostnames "$NAMENODES" \
  93. --script "$bin/hdfs" start zkfc
  94. fi
  95. # eof