start-dfs.sh 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. #!/usr/bin/env bash
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # Start hadoop dfs daemons.
  17. # Optinally upgrade or rollback dfs state.
  18. # Run this on master node.
  19. ## startup matrix:
  20. #
  21. # if $EUID != 0, then exec
  22. # if $EUID =0 then
  23. # if hdfs_subcmd_user is defined, su to that user, exec
  24. # if hdfs_subcmd_user is not defined, error
  25. #
  26. # For secure daemons, this means both the secure and insecure env vars need to be
  27. # defined. e.g., HDFS_DATANODE_USER=root HADOOP_SECURE_DN_USER=hdfs
  28. #
  29. ## @description usage info
  30. ## @audience private
  31. ## @stability evolving
  32. ## @replaceable no
  33. function hadoop_usage
  34. {
  35. echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
  36. }
  37. this="${BASH_SOURCE-$0}"
  38. bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
  39. # let's locate libexec...
  40. if [[ -n "${HADOOP_HOME}" ]]; then
  41. HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
  42. else
  43. HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
  44. fi
  45. HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
  46. # shellcheck disable=SC2034
  47. HADOOP_NEW_CONFIG=true
  48. if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
  49. . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
  50. else
  51. echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
  52. exit 1
  53. fi
  54. # get arguments
  55. if [[ $# -ge 1 ]]; then
  56. startOpt="$1"
  57. shift
  58. case "$startOpt" in
  59. -upgrade)
  60. nameStartOpt="$startOpt"
  61. ;;
  62. -rollback)
  63. dataStartOpt="$startOpt"
  64. ;;
  65. *)
  66. hadoop_exit_with_usage 1
  67. ;;
  68. esac
  69. fi
  70. #Add other possible options
  71. nameStartOpt="$nameStartOpt $*"
  72. #---------------------------------------------------------
  73. # namenodes
  74. NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
  75. if [[ -z "${NAMENODES}" ]]; then
  76. NAMENODES=$(hostname)
  77. fi
  78. echo "Starting namenodes on [${NAMENODES}]"
  79. hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
  80. --workers \
  81. --config "${HADOOP_CONF_DIR}" \
  82. --hostnames "${NAMENODES}" \
  83. --daemon start \
  84. namenode ${nameStartOpt}
  85. HADOOP_JUMBO_RETCOUNTER=$?
  86. #---------------------------------------------------------
  87. # datanodes (using default workers file)
  88. echo "Starting datanodes"
  89. hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
  90. --workers \
  91. --config "${HADOOP_CONF_DIR}" \
  92. --daemon start \
  93. datanode ${dataStartOpt}
  94. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  95. #---------------------------------------------------------
  96. # secondary namenodes (if any)
  97. SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
  98. if [[ -n "${SECONDARY_NAMENODES}" ]]; then
  99. if [[ "${NAMENODES}" =~ , ]]; then
  100. hadoop_error "WARNING: Highly available NameNode is configured."
  101. hadoop_error "WARNING: Skipping SecondaryNameNode."
  102. else
  103. if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
  104. SECONDARY_NAMENODES=$(hostname)
  105. fi
  106. echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
  107. hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
  108. --workers \
  109. --config "${HADOOP_CONF_DIR}" \
  110. --hostnames "${SECONDARY_NAMENODES}" \
  111. --daemon start \
  112. secondarynamenode
  113. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  114. fi
  115. fi
  116. #---------------------------------------------------------
  117. # quorumjournal nodes (if any)
  118. SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
  119. case "${SHARED_EDITS_DIR}" in
  120. qjournal://*)
  121. JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
  122. echo "Starting journal nodes [${JOURNAL_NODES}]"
  123. hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
  124. --workers \
  125. --config "${HADOOP_CONF_DIR}" \
  126. --hostnames "${JOURNAL_NODES}" \
  127. --daemon start \
  128. journalnode
  129. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  130. ;;
  131. esac
  132. #---------------------------------------------------------
  133. # ZK Failover controllers, if auto-HA is enabled
  134. AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
  135. if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
  136. echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
  137. hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
  138. --workers \
  139. --config "${HADOOP_CONF_DIR}" \
  140. --hostnames "${NAMENODES}" \
  141. --daemon start \
  142. zkfc
  143. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  144. fi
  145. exit ${HADOOP_JUMBO_RETCOUNTER}
  146. # eof