stop-dfs.sh 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. #!/usr/bin/env bash
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. function hadoop_usage
  17. {
  18. echo "Usage: start-balancer.sh [--config confdir] [-policy <policy>] [-threshold <threshold>]"
  19. }
  20. this="${BASH_SOURCE-$0}"
  21. bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
  22. # let's locate libexec...
  23. if [[ -n "${HADOOP_PREFIX}" ]]; then
  24. DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
  25. else
  26. DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
  27. fi
  28. HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
  29. # shellcheck disable=SC2034
  30. HADOOP_NEW_CONFIG=true
  31. if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
  32. . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
  33. else
  34. echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
  35. exit 1
  36. fi
  37. #---------------------------------------------------------
  38. # namenodes
  39. NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes)
  40. echo "Stopping namenodes on [$NAMENODES]"
  41. "${bin}/hadoop-daemons.sh" \
  42. --config "${HADOOP_CONF_DIR}" \
  43. --hostnames "${NAMENODES}" \
  44. stop namenode
  45. #---------------------------------------------------------
  46. # datanodes (using default slaves file)
  47. if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
  48. [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
  49. echo \
  50. "ERROR: Attempting to stop secure cluster, skipping datanodes. " \
  51. "Run stop-secure-dns.sh as root to complete shutdown."
  52. else
  53. echo "Stopping datanodes"
  54. "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
  55. fi
  56. #---------------------------------------------------------
  57. # secondary namenodes (if any)
  58. SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
  59. if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
  60. SECONDARY_NAMENODES=$(hostname)
  61. fi
  62. if [[ -n "${SECONDARY_NAMENODES}" ]]; then
  63. echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
  64. "${bin}/hadoop-daemons.sh" \
  65. --config "${HADOOP_CONF_DIR}" \
  66. --hostnames "${SECONDARY_NAMENODES}" \
  67. stop secondarynamenode
  68. fi
  69. #---------------------------------------------------------
  70. # quorumjournal nodes (if any)
  71. SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
  72. case "${SHARED_EDITS_DIR}" in
  73. qjournal://*)
  74. JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
  75. echo "Stopping journal nodes [${JOURNAL_NODES}]"
  76. "${bin}/hadoop-daemons.sh" \
  77. --config "${HADOOP_CONF_DIR}" \
  78. --hostnames "${JOURNAL_NODES}" \
  79. stop journalnode
  80. ;;
  81. esac
  82. #---------------------------------------------------------
  83. # ZK Failover controllers, if auto-HA is enabled
  84. AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
  85. if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
  86. echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
  87. "${bin}/hadoop-daemons.sh" \
  88. --config "${HADOOP_CONF_DIR}" \
  89. --hostnames "${NAMENODES}" \
  90. stop zkfc
  91. fi
  92. # eof