1
0

start-dfs.sh 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. #!/usr/bin/env bash
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # Start hadoop dfs daemons.
  17. # Optinally upgrade or rollback dfs state.
  18. # Run this on master node.
  19. function hadoop_usage
  20. {
  21. echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
  22. }
  23. this="${BASH_SOURCE-$0}"
  24. bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
  25. # let's locate libexec...
  26. if [[ -n "${HADOOP_PREFIX}" ]]; then
  27. DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
  28. else
  29. DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
  30. fi
  31. HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
  32. # shellcheck disable=SC2034
  33. HADOOP_NEW_CONFIG=true
  34. if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
  35. . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
  36. else
  37. echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
  38. exit 1
  39. fi
  40. # get arguments
  41. if [[ $# -ge 1 ]]; then
  42. nameStartOpt="$1"
  43. shift
  44. case "$nameStartOpt" in
  45. -upgrade)
  46. ;;
  47. -rollback)
  48. dataStartOpt="$nameStartOpt"
  49. ;;
  50. *)
  51. hadoop_exit_with_usage 1
  52. ;;
  53. esac
  54. fi
  55. #Add other possible options
  56. nameStartOpt="$nameStartOpt $@"
  57. #---------------------------------------------------------
  58. # namenodes
  59. NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
  60. if [[ -z "${NAMENODES}" ]]; then
  61. NAMENODES=$(hostname)
  62. fi
  63. echo "Starting namenodes on [$NAMENODES]"
  64. "${bin}/hadoop-daemons.sh" \
  65. --config "${HADOOP_CONF_DIR}" \
  66. --hostnames "${NAMENODES}" \
  67. start namenode ${nameStartOpt}
  68. #---------------------------------------------------------
  69. # datanodes (using default slaves file)
  70. if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
  71. [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
  72. echo "ERROR: Attempting to start secure cluster, skipping datanodes. "
  73. echo "Run start-secure-dns.sh as root or configure "
  74. echo "\${HADOOP_SECURE_COMMAND} to complete startup."
  75. else
  76. echo "Starting datanodes"
  77. "${bin}/hadoop-daemons.sh" \
  78. --config "${HADOOP_CONF_DIR}" \
  79. start datanode ${dataStartOpt}
  80. fi
  81. #---------------------------------------------------------
  82. # secondary namenodes (if any)
  83. SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
  84. if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
  85. SECONDARY_NAMENODES=$(hostname)
  86. fi
  87. if [[ -n "${SECONDARY_NAMENODES}" ]]; then
  88. echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
  89. "${bin}/hadoop-daemons.sh" \
  90. --config "${HADOOP_CONF_DIR}" \
  91. --hostnames "${SECONDARY_NAMENODES}" \
  92. start secondarynamenode
  93. fi
  94. #---------------------------------------------------------
  95. # quorumjournal nodes (if any)
  96. SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
  97. case "${SHARED_EDITS_DIR}" in
  98. qjournal://*)
  99. JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
  100. echo "Starting journal nodes [${JOURNAL_NODES}]"
  101. "${bin}/hadoop-daemons.sh" \
  102. --config "${HADOOP_CONF_DIR}" \
  103. --hostnames "${JOURNAL_NODES}" \
  104. start journalnode
  105. ;;
  106. esac
  107. #---------------------------------------------------------
  108. # ZK Failover controllers, if auto-HA is enabled
  109. AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
  110. if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
  111. echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
  112. "${bin}/hadoop-daemons.sh" \
  113. --config "${HADOOP_CONF_DIR}" \
  114. --hostnames "${NAMENODES}" \
  115. start zkfc
  116. fi
  117. # eof