Преглед изворни кода

HDDS-222. Remove hdfs command line from ozone distribution.
Contributed by Elek, Marton.

(cherry picked from commit 7b5886bf784579cc97656266901e6f934522b0e8)

Anu Engineer пре 6 година
родитељ
комит
0d3406e25c

+ 4 - 2
dev-support/bin/ozone-dist-layout-stitching

@@ -127,8 +127,6 @@ run cp -p "${ROOT}/README.txt" .
 # Copy hadoop-common first so that it have always have all dependencies.
 # Remaining projects will copy only libraries which are not present already in 'share' directory.
 run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
-run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
-run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .
 
 
 # HDDS
@@ -151,11 +149,15 @@ cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
 
+rm sbin/*all.sh
+rm sbin/*all.cmd
+
 #Copy docker compose files
 run cp -p -r "${ROOT}/hadoop-dist/src/main/compose" .
 
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn
+mkdir -p ./share/hadoop/hdfs
 echo
 echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
 echo

+ 0 - 1
hadoop-hdds/client/pom.xml

@@ -38,7 +38,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>

+ 0 - 2
hadoop-hdds/container-service/pom.xml

@@ -37,12 +37,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-server-framework</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>

+ 0 - 1
hadoop-hdds/framework/pom.xml

@@ -37,7 +37,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>

+ 0 - 3
hadoop-hdds/pom.xml

@@ -44,17 +44,14 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs-client</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>

+ 0 - 4
hadoop-hdds/server-scm/pom.xml

@@ -37,25 +37,21 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-client</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-server-framework</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>

+ 4 - 4
hadoop-ozone/common/src/main/bin/ozone

@@ -144,11 +144,11 @@ fi
 HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
-  # shellcheck source=./hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
-  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
+  # shellcheck source=./hadoop-ozone/common/src/main/bin/ozone-config.sh
+  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
 else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
   exit 1
 fi
 

+ 51 - 0
hadoop-ozone/common/src/main/bin/ozone-config.sh

@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the ozone scripts with source command
+# should not be executed directly
+
+function hadoop_subproject_init
+{
+  if [[ -z "${HADOOP_OZONE_ENV_PROCESSED}" ]]; then
+    if [[ -e "${HADOOP_CONF_DIR}/ozone-env.sh" ]]; then
+      . "${HADOOP_CONF_DIR}/ozone-env.sh"
+      export HADOOP_OZONE_ENV_PROCESSED=true
+    fi
+  fi
+  HADOOP_OZONE_HOME="${HADOOP_OZONE_HOME:-$HADOOP_HOME}"
+
+}
+
+if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+  _hd_this="${BASH_SOURCE-$0}"
+  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
+fi
+
+# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+else
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
+fi
+

+ 3 - 87
hadoop-ozone/common/src/main/bin/start-ozone.sh

@@ -39,11 +39,11 @@ fi
 HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
   # shellcheck disable=SC1090
-  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
 else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
   exit 1
 fi
 
@@ -83,28 +83,6 @@ if [[ "${OZONE_ENABLED}" != "true" ]]; then
   exit -1
 fi
 
-#---------------------------------------------------------
-# Start hdfs before starting ozone daemons
-
-#---------------------------------------------------------
-# namenodes
-
-NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
-
-if [[ -z "${NAMENODES}" ]]; then
-  NAMENODES=$(hostname)
-fi
-
-echo "Starting namenodes on [${NAMENODES}]"
-hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
-    --workers \
-    --config "${HADOOP_CONF_DIR}" \
-    --hostnames "${NAMENODES}" \
-    --daemon start \
-    namenode ${nameStartOpt}
-
-HADOOP_JUMBO_RETCOUNTER=$?
-
 #---------------------------------------------------------
 # datanodes (using default workers file)
 
@@ -116,68 +94,6 @@ hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \
     datanode ${dataStartOpt}
 (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
 
-#---------------------------------------------------------
-# secondary namenodes (if any)
-
-SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
-
-if [[ -n "${SECONDARY_NAMENODES}" ]]; then
-
-  if [[ "${NAMENODES}" =~ , ]]; then
-
-    hadoop_error "WARNING: Highly available NameNode is configured."
-    hadoop_error "WARNING: Skipping SecondaryNameNode."
-
-  else
-
-    if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
-      SECONDARY_NAMENODES=$(hostname)
-    fi
-
-    echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
-
-    hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
-      --workers \
-      --config "${HADOOP_CONF_DIR}" \
-      --hostnames "${SECONDARY_NAMENODES}" \
-      --daemon start \
-      secondarynamenode
-    (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
-  fi
-fi
-
-#---------------------------------------------------------
-# quorumjournal nodes (if any)
-
-JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
-
-if [[ "${#JOURNAL_NODES}" != 0 ]]; then
-  echo "Starting journal nodes [${JOURNAL_NODES}]"
-
-  hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
-    --workers \
-    --config "${HADOOP_CONF_DIR}" \
-    --hostnames "${JOURNAL_NODES}" \
-    --daemon start \
-    journalnode
-   (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
-fi
-
-#---------------------------------------------------------
-# ZK Failover controllers, if auto-HA is enabled
-AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
-if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
-  echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
-
-  hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
-    --workers \
-    --config "${HADOOP_CONF_DIR}" \
-    --hostnames "${NAMENODES}" \
-    --daemon start \
-    zkfc
-  (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
-fi
-
 #---------------------------------------------------------
 # Ozone ozonemanager nodes
 OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null)

+ 3 - 3
hadoop-ozone/common/src/main/bin/stop-ozone.sh

@@ -39,11 +39,11 @@ fi
 HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then
   # shellcheck disable=SC1090
-  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+  . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh"
 else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1
   exit 1
 fi
 

+ 0 - 2
hadoop-ozone/objectstore-service/pom.xml

@@ -38,13 +38,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-common</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-client</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>

+ 0 - 2
hadoop-ozone/ozone-manager/pom.xml

@@ -38,13 +38,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-common</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-client</artifactId>
-      <scope>provided</scope>
     </dependency>
 
     <dependency>

+ 0 - 9
hadoop-ozone/pom.xml

@@ -49,47 +49,38 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs-client</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-common</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-server-framework</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-server-scm</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-container-service</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-client</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-tools</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>