1
0

start-ozone.sh 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. #!/usr/bin/env bash
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # Start hadoop hdfs and ozone daemons.
  17. # Run this on master node.
  18. ## @description usage info
  19. ## @audience private
  20. ## @stability evolving
  21. ## @replaceable no
  22. function hadoop_usage
  23. {
  24. echo "Usage: start-ozone.sh"
  25. }
  26. this="${BASH_SOURCE-$0}"
  27. bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
  28. # let's locate libexec...
  29. if [[ -n "${HADOOP_HOME}" ]]; then
  30. HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
  31. else
  32. HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
  33. fi
  34. HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
  35. # shellcheck disable=SC2034
  36. HADOOP_NEW_CONFIG=true
  37. if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
  38. # shellcheck disable=SC1090
  39. . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
  40. else
  41. echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
  42. exit 1
  43. fi
  44. SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
  45. SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
  46. if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
  47. echo "Ozone is not supported in a security enabled cluster."
  48. exit 1
  49. fi
  50. #---------------------------------------------------------
  51. # Check if ozone is enabled
  52. OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
  53. if [[ "${OZONE_ENABLED}" != "true" ]]; then
  54. echo "Operation is not supported because ozone is not enabled."
  55. exit -1
  56. fi
  57. #---------------------------------------------------------
  58. # Start hdfs before starting ozone daemons
  59. if [[ -f "${bin}/start-dfs.sh" ]]; then
  60. "${bin}/start-dfs.sh"
  61. else
  62. echo "ERROR: Cannot execute ${bin}/start-dfs.sh." 2>&1
  63. exit 1
  64. fi
  65. #---------------------------------------------------------
  66. # Ozone keyspacemanager nodes
  67. KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
  68. echo "Starting key space manager nodes [${KSM_NODES}]"
  69. if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
  70. KSM_NODES=$(hostname)
  71. fi
  72. hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
  73. --workers \
  74. --config "${HADOOP_CONF_DIR}" \
  75. --hostnames "${KSM_NODES}" \
  76. --daemon start \
  77. ksm
  78. HADOOP_JUMBO_RETCOUNTER=$?
  79. #---------------------------------------------------------
  80. # Ozone storagecontainermanager nodes
  81. SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -storagecontainermanagers 2>/dev/null)
  82. echo "Starting storage container manager nodes [${SCM_NODES}]"
  83. hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
  84. --workers \
  85. --config "${HADOOP_CONF_DIR}" \
  86. --hostnames "${SCM_NODES}" \
  87. --daemon start \
  88. scm
  89. (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
  90. exit ${HADOOP_JUMBO_RETCOUNTER}