start-yarn.sh 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. #!/usr/bin/env bash
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. ## @description usage info
  17. ## @audience private
  18. ## @stability evolving
  19. ## @replaceable no
  20. function hadoop_usage
  21. {
  22. hadoop_generate_usage "${MYNAME}" false
  23. }
  24. MYNAME="${BASH_SOURCE-$0}"
  25. bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
  26. # let's locate libexec...
  27. if [[ -n "${HADOOP_HOME}" ]]; then
  28. HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
  29. else
  30. HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
  31. fi
  32. HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
  33. # shellcheck disable=SC2034
  34. HADOOP_NEW_CONFIG=true
  35. if [[ -f "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" ]]; then
  36. . "${HADOOP_LIBEXEC_DIR}/yarn-config.sh"
  37. else
  38. echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/yarn-config.sh." 2>&1
  39. exit 1
  40. fi
  41. HADOOP_JUMBO_RETCOUNTER=0
  42. # start resourceManager
  43. HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
  44. if [[ ${HARM} = "false" ]]; then
  45. echo "Starting resourcemanager"
  46. hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
  47. --config "${HADOOP_CONF_DIR}" \
  48. --daemon start \
  49. resourcemanager
  50. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  51. else
  52. logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
  53. logicals=${logicals//,/ }
  54. for id in ${logicals}
  55. do
  56. rmhost=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey "yarn.resourcemanager.hostname.${id}" 2>&-)
  57. RMHOSTS="${RMHOSTS} ${rmhost}"
  58. done
  59. echo "Starting resourcemanagers on [${RMHOSTS}]"
  60. hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
  61. --config "${HADOOP_CONF_DIR}" \
  62. --daemon start \
  63. --workers \
  64. --hostnames "${RMHOSTS}" \
  65. resourcemanager
  66. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  67. fi
  68. # start nodemanager
  69. echo "Starting nodemanagers"
  70. hadoop_uservar_su yarn nodemanager "${HADOOP_YARN_HOME}/bin/yarn" \
  71. --config "${HADOOP_CONF_DIR}" \
  72. --workers \
  73. --daemon start \
  74. nodemanager
  75. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  76. # start proxyserver
  77. PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.web-proxy.address 2>&- | cut -f1 -d:)
  78. if [[ -n ${PROXYSERVER} ]]; then
  79. hadoop_uservar_su yarn proxyserver "${HADOOP_YARN_HOME}/bin/yarn" \
  80. --config "${HADOOP_CONF_DIR}" \
  81. --workers \
  82. --hostnames "${PROXYSERVER}" \
  83. --daemon start \
  84. proxyserver
  85. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  86. fi
  87. exit ${HADOOP_JUMBO_RETCOUNTER}