Pārlūkot izejas kodu

HDFS-13258. Ozone: restructure Hdsl/Ozone code to separated maven subprojects.
Contributed by Elek Marton, Mukul Kumar Singh, Xiaoyu Yao, Ajay Kumar, Anu Engineer, Lokesh Jain, Nanda Kumar.

Anu Engineer 7 gadi atpakaļ
vecāks
revīzija
ce23d9adf0
100 mainītis faili ar 1147 papildinājumiem un 1594 dzēšanām
  1. 18 1
      dev-support/bin/dist-layout-stitching
  2. 56 0
      hadoop-cblock/pom.xml
  3. 21 0
      hadoop-cblock/server/dev-support/findbugsExcludeFile.xml
  4. 169 0
      hadoop-cblock/server/pom.xml
  5. 1 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
  6. 11 6
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockManager.java
  7. 129 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CblockUtils.java
  8. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockServiceProtocolClientSideTranslatorPB.java
  9. 8 7
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java
  10. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/package-info.java
  11. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/exception/CBlockException.java
  12. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/exception/package-info.java
  13. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java
  14. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java
  15. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java
  16. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockManagerHandler.java
  17. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java
  18. 1 1
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java
  19. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java
  20. 4 1
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java
  21. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/CacheModule.java
  22. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/LogicalBlock.java
  23. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java
  24. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/BlockBufferFlushTask.java
  25. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/BlockBufferManager.java
  26. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java
  27. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/DiskBlock.java
  28. 1 1
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java
  29. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/package-info.java
  30. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/package-info.java
  31. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/package-info.java
  32. 4 3
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java
  33. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java
  34. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java
  35. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java
  36. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeInfo.java
  37. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/package-info.java
  38. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/package-info.java
  39. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/CBlockClientProtocol.java
  40. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/CBlockServiceProtocol.java
  41. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java
  42. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/package-info.java
  43. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolPB.java
  44. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java
  45. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockServiceProtocolPB.java
  46. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockServiceProtocolServerSideTranslatorPB.java
  47. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/package-info.java
  48. 4 4
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java
  49. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/package-info.java
  50. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/util/KeyUtil.java
  51. 0 0
      hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/util/package-info.java
  52. 2 2
      hadoop-cblock/server/src/main/proto/CBlockClientServerProtocol.proto
  53. 0 0
      hadoop-cblock/server/src/main/proto/CBlockServiceProtocol.proto
  54. 347 0
      hadoop-cblock/server/src/main/resources/cblock-default.xml
  55. 3 3
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java
  56. 35 0
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockConfigurationFields.java
  57. 4 4
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java
  58. 1 1
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java
  59. 1 1
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java
  60. 1 1
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java
  61. 2 1
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java
  62. 0 0
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java
  63. 9 9
      hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java
  64. 0 0
      hadoop-cblock/server/src/test/resources/dynamicprovisioner/expected1-pv.json
  65. 0 0
      hadoop-cblock/server/src/test/resources/dynamicprovisioner/input1-pvc.json
  66. 42 0
      hadoop-cblock/tools/pom.xml
  67. 6 30
      hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java
  68. 0 0
      hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/package-info.java
  69. 0 0
      hadoop-cblock/tools/src/test/org/apache/hadoop/cblock/TestCBlockCLI.java
  70. 7 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  71. 9 94
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  72. 24 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  73. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  74. 38 0
      hadoop-dist/pom.xml
  75. 11 9
      hadoop-dist/src/main/compose/cblock/docker-compose.yaml
  76. 1 1
      hadoop-dist/src/main/compose/cblock/docker-config
  77. 9 7
      hadoop-dist/src/main/compose/ozone/docker-compose.yaml
  78. 1 0
      hadoop-dist/src/main/compose/ozone/docker-config
  79. 1 7
      hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
  80. 9 32
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  81. 25 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
  82. 10 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  83. 0 874
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
  84. 1 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
  85. 0 9
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  86. 5 119
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  87. 0 47
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  88. 0 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
  89. 0 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
  90. 0 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  91. 1 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  92. 42 90
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  93. 48 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeServicePlugin.java
  94. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  95. 3 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
  96. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
  97. 6 70
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/URLDispatcher.java
  98. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
  99. 4 45
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
  100. 3 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js

+ 18 - 1
dev-support/bin/dist-layout-stitching

@@ -14,7 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 # project.version
 VERSION=$1
 
@@ -129,6 +128,24 @@ run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}"
 run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${VERSION}" .
 run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .
 run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-native-client/target/hadoop-hdfs-native-client-${VERSION}" .
+
+run copy "${ROOT}/hadoop-hdsl/common/target/hadoop-hdsl-common-${VERSION}" .
+run copy "${ROOT}/hadoop-hdsl/framework/target/hadoop-hdsl-server-framework-${VERSION}" .
+run copy "${ROOT}/hadoop-hdsl/server-scm/target/hadoop-hdsl-server-scm-${VERSION}" .
+run copy "${ROOT}/hadoop-hdsl/tools/target/hadoop-hdsl-tools-${VERSION}" .
+run copy "${ROOT}/hadoop-hdsl/container-service/target/hadoop-hdsl-container-service-${VERSION}" .
+run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${VERSION}" .
+run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${VERSION}" .
+run copy "${ROOT}/hadoop-ozone/ozone-client/target/hadoop-ozone-client-${VERSION}" .
+run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${VERSION}" .
+run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${VERSION}" .
+run copy "${ROOT}/hadoop-cblock/server/target/hadoop-cblock-server-${VERSION}" .
+run copy "${ROOT}/hadoop-cblock/tools/target/hadoop-cblock-tools-${VERSION}" .
+run cp -r "${ROOT}/hadoop-hdsl/framework/target/hadoop-hdsl-server-framework-${VERSION}/share/hadoop/hdsl/webapps/static" share/hadoop/ozone/webapps/
+run cp -r "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}/share/hadoop/hdfs/webapps/static" share/hadoop/ozone/webapps/
+run cp -r "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}/share/hadoop/hdfs/webapps/static" share/hadoop/hdsl/webapps/
+
+
 run copy "${ROOT}/hadoop-yarn-project/target/hadoop-yarn-project-${VERSION}" .
 run copy "${ROOT}/hadoop-mapreduce-project/target/hadoop-mapreduce-${VERSION}" .
 run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" .

+ 56 - 0
hadoop-cblock/pom.xml

@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project-dist</artifactId>
+    <version>3.2.0-SNAPSHOT</version>
+    <relativePath>../hadoop-project-dist</relativePath>
+  </parent>
+  <artifactId>hadoop-cblock</artifactId>
+  <version>3.2.0-SNAPSHOT</version>
+  <description>Apache Hadoop Cblock parent project</description>
+  <name>Apache Hadoop Cblock</name>
+  <packaging>pom</packaging>
+
+  <modules>
+    <module>server</module>
+    <module>tools</module>
+  </modules>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile combine.self="override"></excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>

+ 21 - 0
hadoop-cblock/server/dev-support/findbugsExcludeFile.xml

@@ -0,0 +1,21 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <Match>
+    <Package name="org.apache.hadoop.cblock.protocol.proto"/>
+  </Match>
+</FindBugsFilter>

+ 169 - 0
hadoop-cblock/server/pom.xml

@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-cblock</artifactId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-cblock-server</artifactId>
+  <version>3.2.0-SNAPSHOT</version>
+  <description>Apache Hadoop CBlock Server</description>
+  <name>Apache Hadoop CBlock Server</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.component>cblock</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-server-framework</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-client</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-integration-test</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+    <dependency>
+      <groupId>org.jscsi</groupId>
+      <artifactId>target</artifactId>
+      <version>2.6.0</version>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>ch.qos.logback</groupId>
+          <artifactId>logback-classic</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>io.kubernetes</groupId>
+      <artifactId>client-java</artifactId>
+      <version>1.0.0-beta1</version>
+      <exclusions>
+        <exclusion>
+          <groupId>io.swagger</groupId>
+          <artifactId>swagger-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.github.stefanbirkner</groupId>
+          <artifactId>system-rules</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+  </dependencies>
+
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/test/resources/dynamicprovisioner/expected1-pv.json</exclude>
+            <exclude>src/test/resources/dynamicprovisioner/input1-pvc.json</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
+        <executions>
+          <execution>
+            <id>compile-protoc</id>
+            <goals>
+              <goal>protoc</goal>
+            </goals>
+            <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
+              <imports>
+                <param>
+                  ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
+                </param>
+                <param>
+                  ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
+                </param>
+                <param>
+                  ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
+                </param>
+                <param>
+                  ${basedir}/../../hadoop-hdsl/common/src/main/proto/
+                </param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>CBlockClientServerProtocol.proto</include>
+                  <include>CBlockServiceProtocol.proto</include>
+                </includes>
+              </source>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java

@@ -23,6 +23,7 @@ import static java.lang.Thread.NORM_PRIORITY;
  * This class contains constants for configuration keys used in CBlock.
  */
 public final class CBlockConfigKeys {
+
   public static final String DFS_CBLOCK_SERVICERPC_ADDRESS_KEY =
       "dfs.cblock.servicerpc-address";
   public static final int DFS_CBLOCK_SERVICERPC_PORT_DEFAULT =

+ 11 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockManager.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
 import org.apache.hadoop.cblock.protocolPB
     .CBlockServiceProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.scm.XceiverClientManager;
@@ -46,12 +45,17 @@ import org.apache.hadoop.cblock.util.KeyUtil;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.utils.LevelDBStore;
+
+import static org.apache.hadoop.cblock.CblockUtils.getCblockServerRpcAddr;
+import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr;
+import static org.apache.hadoop.ozone.web.util.ServerUtils
+    .updateRPCListenAddress;
 import org.iq80.leveldb.DBIterator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -148,7 +152,7 @@ public class CBlockManager implements CBlockServiceProtocol,
         ProtobufRpcEngine.class);
     // start service for client command-to-cblock server service
     InetSocketAddress serviceRpcAddr =
-        OzoneClientUtils.getCblockServiceRpcAddr(conf);
+        getCblockServiceRpcAddr(conf);
     BlockingService cblockProto =
         CBlockServiceProtocolProtos
             .CBlockServiceProtocolService
@@ -161,14 +165,14 @@ public class CBlockManager implements CBlockServiceProtocol,
         DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY,
         DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT);
     InetSocketAddress cblockServiceRpcAddress =
-        OzoneClientUtils.updateRPCListenAddress(conf,
+        updateRPCListenAddress(conf,
             DFS_CBLOCK_SERVICERPC_ADDRESS_KEY, serviceRpcAddr, cblockService);
     LOG.info("CBlock manager listening for client commands on: {}",
         cblockServiceRpcAddress);
     // now start service for cblock client-to-cblock server communication
 
     InetSocketAddress serverRpcAddr =
-        OzoneClientUtils.getCblockServerRpcAddr(conf);
+        getCblockServerRpcAddr(conf);
     BlockingService serverProto =
         CBlockClientServerProtocolProtos
             .CBlockClientServerProtocolService
@@ -182,7 +186,7 @@ public class CBlockManager implements CBlockServiceProtocol,
         DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY,
         DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT);
     InetSocketAddress cblockServerRpcAddress =
-        OzoneClientUtils.updateRPCListenAddress(conf,
+        updateRPCListenAddress(conf,
             DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, serverRpcAddr, cblockServer);
     LOG.info("CBlock server listening for client commands on: {}",
         cblockServerRpcAddress);
@@ -389,6 +393,7 @@ public class CBlockManager implements CBlockServiceProtocol,
   public static void main(String[] args) throws Exception {
     long version = RPC.getProtocolVersion(
         StorageContainerLocationProtocolPB.class);
+    CblockUtils.activateConfigs();
     OzoneConfiguration ozoneConf = new OzoneConfiguration();
     String scmAddress = ozoneConf.get(DFS_CBLOCK_SCM_IPADDRESS_KEY,
         DFS_CBLOCK_SCM_IPADDRESS_DEFAULT);

+ 129 - 0
hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CblockUtils.java

@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.cblock;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+
+import com.google.common.base.Optional;
+import static org.apache.hadoop.cblock.CBlockConfigKeys
+    .DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
+import static org.apache.hadoop.cblock.CBlockConfigKeys
+    .DFS_CBLOCK_JSCSI_PORT_DEFAULT;
+import static org.apache.hadoop.cblock.CBlockConfigKeys
+    .DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
+import static org.apache.hadoop.cblock.CBlockConfigKeys
+    .DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT;
+import static org.apache.hadoop.cblock.CBlockConfigKeys
+    .DFS_CBLOCK_SERVICERPC_PORT_DEFAULT;
+import static org.apache.hadoop.hdsl.HdslUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdsl.HdslUtils.getPortNumberFromConfigKeys;
+
+/**
+ * Generic stateless utility functions for CBlock components.
+ */
+public class CblockUtils {
+
+  private CblockUtils() {
+  }
+
+  /**
+   * Retrieve the socket address that is used by CBlock Service.
+   *
+   * @param conf
+   * @return Target InetSocketAddress for the CBlock Service endpoint.
+   */
+  public static InetSocketAddress getCblockServiceRpcAddr(Configuration conf) {
+    final Optional<String> host =
+        getHostNameFromConfigKeys(conf, DFS_CBLOCK_SERVICERPC_ADDRESS_KEY);
+
+    // If no port number is specified then we'll just try the defaultBindPort.
+    final Optional<Integer> port =
+        getPortNumberFromConfigKeys(conf, DFS_CBLOCK_SERVICERPC_ADDRESS_KEY);
+
+    return NetUtils.createSocketAddr(
+        host.or(DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT) + ":" + port
+            .or(DFS_CBLOCK_SERVICERPC_PORT_DEFAULT));
+  }
+
+  /**
+   * Retrieve the socket address that is used by CBlock Server.
+   *
+   * @param conf
+   * @return Target InetSocketAddress for the CBlock Server endpoint.
+   */
+  public static InetSocketAddress getCblockServerRpcAddr(Configuration conf) {
+    final Optional<String> host =
+        getHostNameFromConfigKeys(conf, DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY);
+
+    // If no port number is specified then we'll just try the defaultBindPort.
+    final Optional<Integer> port =
+        getPortNumberFromConfigKeys(conf, DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY);
+
+    return NetUtils.createSocketAddr(
+        host.or(DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT) + ":" + port
+            .or(DFS_CBLOCK_JSCSI_PORT_DEFAULT));
+  }
+
+  /**
+   * Parse size with size prefix string and return in bytes.
+   *
+   */
+  public static long parseSize(String volumeSizeArgs) throws IOException {
+    long multiplier = 1;
+
+    Pattern p = Pattern.compile("([0-9]+)([a-zA-Z]+)");
+    Matcher m = p.matcher(volumeSizeArgs);
+
+    if (!m.find()) {
+      throw new IOException("Invalid volume size args " + volumeSizeArgs);
+    }
+
+    int size = Integer.parseInt(m.group(1));
+    String s = m.group(2);
+
+    if (s.equalsIgnoreCase("MB") ||
+        s.equalsIgnoreCase("Mi")) {
+      multiplier = 1024L * 1024;
+    } else if (s.equalsIgnoreCase("GB") ||
+        s.equalsIgnoreCase("Gi")) {
+      multiplier = 1024L * 1024 * 1024;
+    } else if (s.equalsIgnoreCase("TB") ||
+        s.equalsIgnoreCase("Ti")) {
+      multiplier = 1024L * 1024 * 1024 * 1024;
+    } else {
+      throw new IOException("Invalid volume size args " + volumeSizeArgs);
+    }
+    return size * multiplier;
+  }
+
+  public static void activateConfigs(){
+    Configuration.addDefaultResource("hdfs-default.xml");
+    Configuration.addDefaultResource("hdfs-site.xml");
+    Configuration.addDefaultResource("ozone-default.xml");
+    Configuration.addDefaultResource("ozone-site.xml");
+    Configuration.addDefaultResource("cblock-default.xml");
+    Configuration.addDefaultResource("cblock-site.xml");
+
+  }
+}

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/client/CBlockServiceProtocolClientSideTranslatorPB.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockServiceProtocolClientSideTranslatorPB.java


+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java

@@ -17,20 +17,21 @@
  */
 package org.apache.hadoop.cblock.client;
 
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.cblock.CBlockConfigKeys;
 import org.apache.hadoop.cblock.meta.VolumeInfo;
 import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.apache.hadoop.security.UserGroupInformation;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
+import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr;
 
 /**
  * Implementation of client used by CBlock command line tool.
@@ -45,7 +46,7 @@ public class CBlockVolumeClient {
   public CBlockVolumeClient(OzoneConfiguration conf,
       InetSocketAddress serverAddress) throws IOException {
     InetSocketAddress address = serverAddress != null ? serverAddress :
-        OzoneClientUtils.getCblockServiceRpcAddr(conf);
+        getCblockServiceRpcAddr(conf);
     long version = RPC.getProtocolVersion(CBlockServiceProtocolPB.class);
     int rpcTimeout = Math.toIntExact(
         conf.getTimeDuration(CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT,

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/client/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/exception/CBlockException.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/exception/CBlockException.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/exception/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/exception/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockManagerHandler.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockManagerHandler.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetMetrics.java


+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.cblock.jscsiHelper;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.cblock.proto.MountVolumeResponse;
 import org.apache.hadoop.cblock.util.KeyUtil;
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.scm.XceiverClientManager;
 import org.jscsi.target.Configuration;
 import org.jscsi.target.Target;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java


+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java

@@ -23,13 +23,15 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys
     .DFS_CBLOCK_ISCSI_ADVERTISED_PORT;
 import static org.apache.hadoop.cblock.CBlockConfigKeys
     .DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT;
+
+import org.apache.hadoop.cblock.CblockUtils;
 import org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB;
 import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.scm.client.ContainerOperationClient;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -59,6 +61,7 @@ import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
  */
 public final class SCSITargetDaemon {
   public static void main(String[] args) throws Exception {
+    CblockUtils.activateConfigs();
     OzoneConfiguration ozoneConf = new OzoneConfiguration();
 
     RPC.setProtocolEngine(ozoneConf, CBlockClientServerProtocolPB.class,

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/CacheModule.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/CacheModule.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/LogicalBlock.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/LogicalBlock.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/BlockBufferFlushTask.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/BlockBufferFlushTask.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/BlockBufferManager.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/BlockBufferManager.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/DiskBlock.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/DiskBlock.java


+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java

@@ -22,7 +22,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import org.apache.hadoop.scm.XceiverClientSpi;
 import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.scm.storage.ContainerProtocolCalls;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/jscsiHelper/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/package-info.java


+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java

@@ -33,11 +33,12 @@ import io.kubernetes.client.models.V1PersistentVolumeSpec;
 import io.kubernetes.client.util.Config;
 import io.kubernetes.client.util.Watch;
 import okio.Buffer;
-import org.apache.hadoop.cblock.cli.CBlockCli;
+
+import org.apache.hadoop.cblock.CblockUtils;
 import org.apache.hadoop.cblock.exception.CBlockException;
 import org.apache.hadoop.cblock.proto.MountVolumeResponse;
 import org.apache.hadoop.cblock.storage.StorageManager;
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -185,7 +186,7 @@ public class DynamicProvisioner implements Runnable{
 
             String volumeName = createVolumeName(claim);
 
-            long size = CBlockCli.parseSize(
+            long size = CblockUtils.parseSize(
                 claim.getSpec().getResources().getRequests().get("storage"));
 
             createCBlock(volumeName, size);

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/meta/VolumeInfo.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeInfo.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/meta/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/proto/CBlockClientProtocol.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/CBlockClientProtocol.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/proto/CBlockServiceProtocol.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/CBlockServiceProtocol.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/proto/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolPB.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolPB.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockServiceProtocolPB.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockServiceProtocolPB.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockServiceProtocolServerSideTranslatorPB.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockServiceProtocolServerSideTranslatorPB.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/protocolPB/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/package-info.java


+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java

@@ -25,8 +25,8 @@ import org.apache.hadoop.cblock.meta.VolumeDescriptor;
 import org.apache.hadoop.cblock.meta.VolumeInfo;
 import org.apache.hadoop.cblock.proto.MountVolumeResponse;
 import org.apache.hadoop.cblock.util.KeyUtil;
-import org.apache.hadoop.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.slf4j.Logger;
@@ -187,8 +187,8 @@ public class StorageManager {
       ContainerDescriptor container = null;
       try {
         Pipeline pipeline = storageClient.createContainer(
-            OzoneProtos.ReplicationType.STAND_ALONE,
-            OzoneProtos.ReplicationFactor.ONE,
+            HdslProtos.ReplicationType.STAND_ALONE,
+            HdslProtos.ReplicationFactor.ONE,
             KeyUtil.getContainerName(volume.getUserName(),
                 volume.getVolumeName(), containerIdx), cblockId);
 

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/util/KeyUtil.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/util/KeyUtil.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/util/package-info.java → hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/util/package-info.java


+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockClientServerProtocol.proto → hadoop-cblock/server/src/main/proto/CBlockClientServerProtocol.proto

@@ -27,7 +27,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.cblock;
 
-import "Ozone.proto";
+import "hdsl.proto";
 import "CBlockServiceProtocol.proto";
 /**
 * This message is sent from CBlock client side to CBlock server to
@@ -69,7 +69,7 @@ message ContainerIDProto {
     required string containerID = 1;
     required uint64 index = 2;
     // making pipeline optional to be compatible with exisiting tests
-    optional hadoop.hdfs.ozone.Pipeline pipeline = 3;
+    optional hadoop.hdsl.Pipeline pipeline = 3;
 }
 
 

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockServiceProtocol.proto → hadoop-cblock/server/src/main/proto/CBlockServiceProtocol.proto


+ 347 - 0
hadoop-cblock/server/src/main/resources/cblock-default.xml

@@ -0,0 +1,347 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Do not modify this file directly.  Instead, copy entries that you -->
+<!-- wish to modify from this file into ozone-site.xml and change them -->
+<!-- there.  If ozone-site.xml does not already exist, create it.      -->
+
+<!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
+<!--DEBUG, CLIENT, SERVER, KSM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
+<!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
+
+<configuration>
+
+  <!--CBlock Settings-->
+  <property>
+    <name>dfs.cblock.block.buffer.flush.interval</name>
+    <value>60s</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Controls the frequency at this the local cache flushes the
+      blocks to the remote containers.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.block.buffer.size</name>
+    <value>512</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Size of the local cache for blocks. So cache size will be block
+      size multiplied by this number.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.core.min.pool.size</name>
+    <value>16</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      A minimum number of threads in the pool that cBlock cache will
+      use for the background I/O to remote containers.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.max.pool.size</name>
+    <value>256</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Maximum number of threads in the pool that cBlock cache will
+      use for background I/O to remote containers.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.keep.alive</name>
+    <value>60s</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      If the cblock cache has no I/O, then the threads in the cache
+      pool are kept idle for this amount of time before shutting down.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.leveldb.cache.size.mb</name>
+    <value>256</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      The amount of physical memory allocated to the local cache. The
+      SCSI driver will allocate this much RAM cache instances.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.max.retry</name>
+    <value>65536</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      If the local cache is enabled then, CBlock writes to the local
+      cache when I/O happens. Then the background I/O threads write this
+      block to the remote containers. This value controls how many times the
+      background thread should attempt to do I/O to the remote containers
+      before giving up.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.queue.size.in.kb</name>
+    <value>256</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Size of the in memory cache queue, that is flushed to local
+      disk.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.cache.thread.priority</name>
+    <value>5</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Priority of cache flusher thread, affecting the relative performance of
+      write and read. Supported values are 1, 5, 10.
+      Use 10 for high priority and 1 for low priority.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.container.size.gb</name>
+    <value>5</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The size of ozone container in the number of GBs. Note that
+      this is not setting container size for ozone. This setting is
+      instructing CBlock to manage containers at a standard size.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.disk.cache.path</name>
+    <value>${hadoop.tmp.dir}/cblockCacheDB</value>
+    <tag>CBLOCK, REQUIRED</tag>
+    <description>
+      The default path for the cblock local cache. If the cblock
+      local cache is enabled, then it must be set to a valid path. This cache
+      *should* be mapped to the fastest disk on a given machine, For example,
+      an SSD drive would be a good idea. Currently, all mounted disk on a
+      data node is mapped to a single path, so having a large number of IOPS
+      is essential.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.jscsi-address</name>
+    <value/>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The address that cblock will be bind to, should be a host:port
+      format, This setting is required for cblock server to start.
+      This address to be used by jscsi to mount volume.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.jscsi.cblock.server.address</name>
+    <value>127.0.0.1</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The address local jscsi server will use to talk to cblock manager.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.jscsi.port</name>
+    <value>9811</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The port on CBlockManager node for jSCSI to talk to.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.jscsi.rpc-bind-host</name>
+    <value>0.0.0.0</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The actual address the cblock jscsi rpc server will bind to. If
+      this optional address is set, it overrides only the hostname portion of
+      dfs.cblock.jscsi-address.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.jscsi.server.address</name>
+    <value>0.0.0.0</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The address that jscsi server will be running, it is nice have one
+      local jscsi server for each client(Linux JSCSI client) that tries to
+      mount cblock.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.manager.pool.size</name>
+    <value>16</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Number of active threads that cblock manager will use for container
+      operations. The maximum number of the threads are limited to the
+      processor count * 2.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.rpc.timeout</name>
+    <value>300s</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      RPC timeout used for cblock CLI operations. When you
+      create very large disks, like 5TB, etc. The number of containers
+      allocated in the system is huge. It is will 5TB/5GB, which is 1000
+      containers. The client CLI might timeout even though the cblock manager
+      creates the specified disk. This value allows the user to wait for a
+      longer period.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.scm.ipaddress</name>
+    <value>127.0.0.1</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      IP address used by cblock to connect to SCM.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.scm.port</name>
+    <value>9860</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      Port used by cblock to connect to SCM.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.service.handler.count</name>
+    <value>10</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      Default number of handlers for CBlock service rpc.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.service.leveldb.path</name>
+    <value>${hadoop.tmp.dir}/cblock_server.dat</value>
+    <tag>CBLOCK, REQUIRED</tag>
+    <description>
+      Default path for the cblock meta data store.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.service.rpc-bind-host</name>
+    <value>0.0.0.0</value>
+    <tag>CBLOCK, MANAGEMENT</tag>
+    <description>
+      The actual address the cblock service RPC server will bind to.
+      If the optional address is set, it overrides only the hostname portion of
+      dfs.cblock.servicerpc-address.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.servicerpc-address</name>
+    <value/>
+    <tag>CBLOCK, MANAGEMENT, REQUIRED</tag>
+    <description>
+      The address that cblock will be bind to, should be a host:port
+      format, this setting is required for cblock server to start.
+      This address is used for cblock management operations like create, delete,
+      info and list volumes
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.short.circuit.io</name>
+    <value>false</value>
+    <tag>CBLOCK, PERFORMANCE</tag>
+    <description>
+      Enables use of the local cache in cblock. Enabling this allows
+      I/O against the local cache and background threads do actual I/O against
+      the
+      containers.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cblock.trace.io</name>
+    <value>false</value>
+    <tag>CBLOCK, DEBUG</tag>
+    <description>Default flag for enabling trace io, Trace I/O logs all I/O with
+      hashes of
+      data. This is useful for detecting things like data corruption.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.iscsi.advertised.ip</name>
+    <value>0.0.0.0</value>
+    <tag>CBLOCK</tag>
+    <description>
+      IP address returned during the iscsi discovery.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.iscsi.advertised.port</name>
+    <value>3260</value>
+    <tag>CBLOCK</tag>
+    <description>
+      TCP port returned during the iscsi discovery.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.kubernetes.dynamic-provisioner.enabled</name>
+    <value>false</value>
+    <tag>CBLOCK, KUBERNETES</tag>
+    <description>Flag to enable automatic creation of cblocks and
+      kubernetes PersitentVolumes in kubernetes environment.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.kubernetes.cblock-user</name>
+    <value>iqn.2001-04.org.apache.hadoop</value>
+    <tag>CBLOCK, KUBERNETES</tag>
+    <description>CBlock user to use for the dynamic provisioner.
+      This user will own all of the auto-created cblocks.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.kubernetes.configfile</name>
+    <value></value>
+    <tag>CBLOCK, KUBERNETES</tag>
+    <description>Location of the kubernetes configuration file
+      to access the kubernetes cluster. Not required inside a pod
+      as the default service account will be if this value is
+      empty.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.iscsi.advertised.ip</name>
+    <value></value>
+    <tag>CBLOCK, KUBERNETES</tag>
+    <description>IP where the cblock target server is available
+      from the kubernetes nodes. Usually it's a cluster ip address
+      which is defined by a deployed Service.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cblock.iscsi.advertised.port</name>
+    <value>3260</value>
+    <tag>CBLOCK, KUBERNETES</tag>
+    <description>Port where the cblock target server is available
+      from the kubernetes nodes. Could be different from the
+      listening port if jscsi is behind a Service.
+    </description>
+  </property>
+</configuration>

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java

@@ -23,16 +23,16 @@ import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
 import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
 import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.scm.XceiverClientSpi;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;

+ 35 - 0
hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockConfigurationFields.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.cblock;
+
+import org.apache.hadoop.conf.TestConfigurationFieldsBase;
+
+/**
+ * Tests if configuration constants documented in ozone-defaults.xml.
+ */
+public class TestCBlockConfigurationFields extends TestConfigurationFieldsBase {
+
+  @Override
+  public void initializeMemberVariables() {
+    xmlFilename = new String("cblock-default.xml");
+    configurationClasses =
+        new Class[] {CBlockConfigKeys.class};
+    errorIfMissingConfigProps = true;
+    errorIfMissingXmlProps = true;
+  }
+}

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java

@@ -24,14 +24,14 @@ import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
 import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
 import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
 import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.LifeCycleState;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.ReplicationFactor;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.ReplicationType;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
 import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.scm.XceiverClientSpi;
 import org.apache.hadoop.scm.container.common.helpers.PipelineChannel;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java

@@ -19,9 +19,9 @@ package org.apache.hadoop.cblock;
 
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.cblock.meta.VolumeInfo;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.cblock.util.MockStorageClient;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java

@@ -18,10 +18,10 @@
 package org.apache.hadoop.cblock;
 
 import org.apache.hadoop.cblock.meta.VolumeDescriptor;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.cblock.util.MockStorageClient;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.junit.Test;
 
 import java.io.File;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java

@@ -25,10 +25,10 @@ import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
 import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
 import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
 import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.scm.XceiverClientSpi;

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java

@@ -23,13 +23,14 @@ import io.kubernetes.client.models.V1PersistentVolume;
 import io.kubernetes.client.models.V1PersistentVolumeClaim;
 import static org.apache.hadoop.cblock.CBlockConfigKeys
     .DFS_CBLOCK_ISCSI_ADVERTISED_IP;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.junit.Assert;
 import org.junit.Test;
 
 import java.nio.file.Files;
 import java.nio.file.Paths;
 
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+
 /**
  * Test the resource generation of Dynamic Provisioner.
  */

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java


+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java → hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java

@@ -18,9 +18,9 @@
 package org.apache.hadoop.cblock.util;
 
 import org.apache.hadoop.cblock.meta.ContainerDescriptor;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
+import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
@@ -88,7 +88,7 @@ public class MockStorageClient implements ScmClient {
     ContainerInfo container = new ContainerInfo.Builder()
         .setContainerName(containerDescriptor.getContainerID())
         .setPipeline(containerDescriptor.getPipeline())
-        .setState(OzoneProtos.LifeCycleState.ALLOCATED)
+        .setState(HdslProtos.LifeCycleState.ALLOCATED)
         .build();
     containerList.add(container);
     return containerList;
@@ -134,8 +134,8 @@ public class MockStorageClient implements ScmClient {
   }
 
   @Override
-  public Pipeline createContainer(OzoneProtos.ReplicationType type,
-      OzoneProtos.ReplicationFactor replicationFactor, String containerId,
+  public Pipeline createContainer(HdslProtos.ReplicationType type,
+      HdslProtos.ReplicationFactor replicationFactor, String containerId,
       String owner) throws IOException {
     int contId = currentContainerId.getAndIncrement();
     ContainerLookUpService.addContainer(Long.toString(contId));
@@ -153,8 +153,8 @@ public class MockStorageClient implements ScmClient {
    * @throws IOException
    */
   @Override
-  public OzoneProtos.NodePool queryNode(EnumSet<OzoneProtos.NodeState>
-      nodeStatuses, OzoneProtos.QueryScope queryScope, String poolName)
+  public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState>
+      nodeStatuses, HdslProtos.QueryScope queryScope, String poolName)
       throws IOException {
     return null;
   }
@@ -168,8 +168,8 @@ public class MockStorageClient implements ScmClient {
    * @throws IOException
    */
   @Override
-  public Pipeline createReplicationPipeline(OzoneProtos.ReplicationType type,
-      OzoneProtos.ReplicationFactor factor, OzoneProtos.NodePool nodePool)
+  public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type,
+      HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool)
       throws IOException {
     return null;
   }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/expected1-pv.json → hadoop-cblock/server/src/test/resources/dynamicprovisioner/expected1-pv.json


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/input1-pvc.json → hadoop-cblock/server/src/test/resources/dynamicprovisioner/input1-pvc.json


+ 42 - 0
hadoop-cblock/tools/pom.xml

@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-cblock</artifactId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-cblock-tools</artifactId>
+  <version>3.2.0-SNAPSHOT</version>
+  <description>Apache Hadoop CBlock Tools</description>
+  <name>Apache Hadoop CBlock Tools</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.component>cblock</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-cblock-server</artifactId>
+    </dependency>
+  </dependencies>
+</project>

+ 6 - 30
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java → hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java

@@ -24,15 +24,18 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
+
+import org.apache.hadoop.cblock.CblockUtils;
 import org.apache.hadoop.cblock.client.CBlockVolumeClient;
 import org.apache.hadoop.cblock.meta.VolumeInfo;
 import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,8 +44,6 @@ import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
 /**
  * The command line tool class.
@@ -194,6 +195,7 @@ public class CBlockCli extends Configured implements Tool {
   }
 
   public static void main(String[] argv) throws Exception {
+    CblockUtils.activateConfigs();
     OzoneConfiguration cblockConf = new OzoneConfiguration();
     RPC.setProtocolEngine(cblockConf, CBlockServiceProtocolPB.class,
         ProtobufRpcEngine.class);
@@ -208,38 +210,12 @@ public class CBlockCli extends Configured implements Tool {
     System.exit(res);
   }
 
-  public static long parseSize(String volumeSizeArgs) throws IOException {
-    long multiplier = 1;
-
-    Pattern p = Pattern.compile("([0-9]+)([a-zA-Z]+)");
-    Matcher m = p.matcher(volumeSizeArgs);
-
-    if (!m.find()) {
-      throw new IOException("Invalid volume size args " + volumeSizeArgs);
-    }
 
-    int size = Integer.parseInt(m.group(1));
-    String s = m.group(2);
-
-    if (s.equalsIgnoreCase("MB") ||
-        s.equalsIgnoreCase("Mi")) {
-      multiplier = 1024L * 1024;
-    } else if (s.equalsIgnoreCase("GB") ||
-        s.equalsIgnoreCase("Gi")) {
-      multiplier = 1024L * 1024 * 1024;
-    } else if (s.equalsIgnoreCase("TB") ||
-        s.equalsIgnoreCase("Ti")) {
-      multiplier = 1024L * 1024 * 1024 * 1024;
-    } else {
-      throw new IOException("Invalid volume size args " + volumeSizeArgs);
-    }
-    return size * multiplier;
-  }
 
   private void createVolume(String[] createArgs) throws IOException {
     String userName = createArgs[0];
     String volumeName = createArgs[1];
-    long volumeSize = parseSize(createArgs[2]);
+    long volumeSize = CblockUtils.parseSize(createArgs[2]);
     int blockSize = Integer.parseInt(createArgs[3])*1024;
     localProxy.createVolume(userName, volumeName, volumeSize, blockSize);
   }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/package-info.java → hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/package-info.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/TestCBlockCLI.java → hadoop-cblock/tools/src/test/org/apache/hadoop/cblock/TestCBlockCLI.java


+ 7 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -596,6 +596,13 @@ function hadoop_bootstrap
   YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
   MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
   MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
+  HDSL_DIR=${HDSL_DIR:-"share/hadoop/hdsl"}
+  HDSL_LIB_JARS_DIR=${HDSL_LIB_JARS_DIR:-"share/hadoop/hdsl/lib"}
+  OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
+  OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
+  CBLOCK_DIR=${CBLOCK_DIR:-"share/hadoop/cblock"}
+  CBLOCK_LIB_JARS_DIR=${CBLOCK_LIB_JARS_DIR:-"share/hadoop/cblock/lib"}
+
   HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
   HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
   HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}

+ 9 - 94
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -17,15 +17,9 @@
  */
 package org.apache.hadoop.conf;
 
-import com.google.gson.Gson;
 import java.io.IOException;
 import java.io.Writer;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
@@ -38,8 +32,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.HttpServer2;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A servlet to print out the running configuration data.
@@ -51,12 +43,6 @@ public class ConfServlet extends HttpServlet {
 
   protected static final String FORMAT_JSON = "json";
   protected static final String FORMAT_XML = "xml";
-  private static final String COMMAND = "cmd";
-  private static final Logger LOG = LoggerFactory.getLogger(ConfServlet.class);
-  private transient static final Configuration OZONE_CONFIG = new
-      OzoneConfiguration();
-  private transient Map<String, OzoneConfiguration.Property> propertyMap = null;
-
 
   /**
    * Return the Configuration of the daemon hosting this servlet.
@@ -79,27 +65,21 @@ public class ConfServlet extends HttpServlet {
     final ServletContext servletContext = getServletContext();
     if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
         !HttpServer2.isInstrumentationAccessAllowed(servletContext,
-            request, response)) {
+                                                   request, response)) {
       return;
     }
 
     String format = parseAcceptHeader(request);
-    String cmd = request.getParameter(COMMAND);
-    Writer out = response.getWriter();
+    if (FORMAT_XML.equals(format)) {
+      response.setContentType("text/xml; charset=utf-8");
+    } else if (FORMAT_JSON.equals(format)) {
+      response.setContentType("application/json; charset=utf-8");
+    }
 
+    String name = request.getParameter("name");
+    Writer out = response.getWriter();
     try {
-      if (cmd == null) {
-        if (FORMAT_XML.equals(format)) {
-          response.setContentType("text/xml; charset=utf-8");
-        } else if (FORMAT_JSON.equals(format)) {
-          response.setContentType("application/json; charset=utf-8");
-        }
-
-        String name = request.getParameter("name");
-        writeResponse(getConfFromContext(), out, format, name);
-      } else {
-        processConfigTagRequest(request, out);
-      }
+      writeResponse(getConfFromContext(), out, format, name);
     } catch (BadFormatException bfe) {
       response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
     } catch (IllegalArgumentException iae) {
@@ -108,67 +88,6 @@ public class ConfServlet extends HttpServlet {
     out.close();
   }
 
-  private void processConfigTagRequest(HttpServletRequest request,
-      Writer out) throws IOException {
-    String cmd = request.getParameter(COMMAND);
-    Gson gson = new Gson();
-    Configuration config = getOzoneConfig();
-
-    config.get("ozone.enabled");
-    switch (cmd) {
-    case "getPropertyByTag":
-      String tags = request.getParameter("tags");
-      String tagGroup = request.getParameter("group");
-      LOG.debug("Getting all properties for tags:" + tags + " group:" +
-          tagGroup);
-      List<String> tagList = new ArrayList<>();
-      for (String tag : tags.split(",")) {
-        if (config.isPropertyTag(tag)) {
-          tagList.add(tag);
-        }
-      }
-
-      Properties properties = config.getAllPropertiesByTags(tagList);
-      if (propertyMap == null) {
-        loadDescriptions();
-      }
-
-      List<OzoneConfiguration.Property> filteredProperties = new ArrayList<>();
-
-      properties.stringPropertyNames().stream().forEach(key -> {
-        if (config.get(key) != null) {
-          propertyMap.get(key).setValue(config.get(key));
-          filteredProperties.add(propertyMap.get(key));
-        }
-      });
-      out.write(gson.toJsonTree(filteredProperties).toString());
-      break;
-    default:
-      throw new IllegalArgumentException(cmd + " is not a valid command.");
-    }
-
-  }
-
-  private void loadDescriptions() {
-    OzoneConfiguration config = (OzoneConfiguration) getOzoneConfig();
-    List<OzoneConfiguration.Property> propList = null;
-    propertyMap = new HashMap<>();
-    try {
-      propList = config.readPropertyFromXml(config.getResource("ozone-site"
-          + ".xml"));
-      propList.stream().map(p -> propertyMap.put(p.getName(), p));
-      propList = config.readPropertyFromXml(config.getResource("ozone-default"
-          + ".xml"));
-      propList.stream().forEach(p -> {
-        if (!propertyMap.containsKey(p.getName().trim())) {
-          propertyMap.put(p.getName().trim(), p);
-        }
-      });
-    } catch (Exception e) {
-      LOG.error("Error while reading description from xml files", e);
-    }
-  }
-
   @VisibleForTesting
   static String parseAcceptHeader(HttpServletRequest request) {
     String format = request.getHeader(HttpHeaders.ACCEPT);
@@ -204,8 +123,4 @@ public class ConfServlet extends HttpServlet {
     }
   }
 
-  private static Configuration getOzoneConfig() {
-    return OZONE_CONFIG;
-  }
-
 }

+ 24 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -18,24 +18,6 @@
 
 package org.apache.hadoop.fs;
 
-import com.google.common.base.Preconditions;
-import org.apache.commons.collections.map.CaseInsensitiveMap;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedReader;
@@ -71,8 +53,26 @@ import java.util.zip.CheckedOutputStream;
 import java.util.zip.GZIPInputStream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
-import java.util.zip.ZipOutputStream;
 import java.util.zip.ZipInputStream;
+import java.util.zip.ZipOutputStream;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.collections.map.CaseInsensitiveMap;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of file-processing util methods.
@@ -91,9 +91,9 @@ public class FileUtil {
   public static final int SYMLINK_NO_PRIVILEGE = 2;
 
   /**
-   * Buffer size used while zipping and unzipping zip-ed archives.
+   * Buffer size for copy the content of compressed file to new file.
    */
-  private static final int BUFFER_SIZE = 8192;
+  private static final int BUFFER_SIZE = 8_192;
 
   /**
    * convert an array of FileStatus to an array of Path
@@ -613,7 +613,6 @@ public class FileUtil {
   }
 
   /**
-<<<<<<< HEAD
    * creates zip archieve of the source dir and writes a zip file.
    *
    * @param sourceDir - The directory to zip.
@@ -673,7 +672,7 @@ public class FileUtil {
   }
 
   /**
-   * Given a File input it will unzip the file in a the unzip directory
+   * Given a stream input it will unzip the it in the unzip directory.
    * passed as the second parameter
    * @param inputStream The zip file as input
    * @param toDir The unzip directory where to unzip the zip file.
@@ -731,12 +730,12 @@ public class FileUtil {
             if (!file.getParentFile().mkdirs()) {
               if (!file.getParentFile().isDirectory()) {
                 throw new IOException("Mkdirs failed to create " +
-                    file.getParentFile().toString());
+                                      file.getParentFile().toString());
               }
             }
             OutputStream out = new FileOutputStream(file);
             try {
-              byte[] buffer = new byte[BUFFER_SIZE];
+              byte[] buffer = new byte[8192];
               int i;
               while ((i = in.read(buffer)) != -1) {
                 out.write(buffer, 0, i);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -814,7 +814,7 @@ public class RPC {
 
     static String serverNameFromClass(Class<?> clazz) {
       //The basic idea here is to handle names like
-      //org.apache.hadoop.ozone.protocol.proto.
+      //org.apache.hadoop.hdsl.protocol.proto.
       //
       // StorageDatanodeProtocolProtos$StorageContainerDatanodeProtocolService$2
       //where the getSimpleName is also empty

+ 38 - 0
hadoop-dist/pom.xml

@@ -68,6 +68,44 @@
       <artifactId>hadoop-client-integration-tests</artifactId>
       <scope>provided</scope>
     </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-ozone-manager</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-server-scm</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-tools</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-cblock-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-container-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-objectstore-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-tools</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-tools</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-cblock-tools</artifactId>
+    </dependency>
+
   </dependencies>
 
   <build>

+ 11 - 9
hadoop-dist/src/main/compose/cblock/docker-compose.yaml

@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       hostname: namenode
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
@@ -29,36 +29,38 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
         - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/hdfs","datanode"]
+      command: ["/opt/hadoop/bin/oz","datanode"]
+      env_file:
+         - ./docker-config
    jscsi:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       ports:
         - 3260:3260
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","jscsi"]
+      command: ["/opt/hadoop/bin/oz","jscsi"]
    cblock:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","cblockserver"]
+      command: ["/opt/hadoop/bin/oz","cblockserver"]
    scm:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
          - 9876:9876
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","scm"]
+      command: ["/opt/hadoop/bin/oz","scm"]
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION

+ 1 - 1
hadoop-dist/src/main/compose/cblock/docker-config

@@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
 OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
 OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
-
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HdslServerPlugin,org.apache.hadoop.ozone.web.ObjectStoreRestPlugin
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true

+ 9 - 7
hadoop-dist/src/main/compose/ozone/docker-compose.yaml

@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       hostname: namenode
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
@@ -29,14 +29,16 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
         - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/hdfs","datanode"]
+      command: ["/opt/hadoop/bin/oz","datanode"]
+      env_file:
+        - ./docker-config
    ksm:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
@@ -45,9 +47,9 @@ services:
          ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","ksm"]
+      command: ["/opt/hadoop/bin/oz","ksm"]
    scm:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
@@ -56,4 +58,4 @@ services:
           - ./docker-config
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/hdfs","scm"]
+      command: ["/opt/hadoop/bin/oz","scm"]

+ 1 - 0
hadoop-dist/src/main/compose/ozone/docker-config

@@ -27,6 +27,7 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HdslServerPlugin,org.apache.hadoop.ozone.web.ObjectStoreRestPlugin
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

+ 1 - 7
hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml

@@ -63,13 +63,6 @@
     <Bug pattern="IS2_INCONSISTENT_SYNC" />
   </Match>
 
-  <Match>
-    <Package name="org.apache.hadoop.ozone.protocol.proto" />
-  </Match>
-  <Match>
-    <Package name="org.apache.hadoop.hdfs.ozone.protocol.proto" />
-  </Match>
-
   <!-- BlockLocations are user-facing, but LocatedBlocks are not. -->
   <Match>
     <Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
@@ -98,4 +91,5 @@
     <Method name="getSymlinkInBytes" />
     <Bug pattern="EI_EXPOSE_REP" />
   </Match>
+
 </FindBugsFilter>

+ 9 - 32
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -63,6 +63,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-all</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.mock-server</groupId>
       <artifactId>mockserver-netty</artifactId>
@@ -100,37 +105,14 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>test</scope>
       <type>test-jar</type>
     </dependency>
-    <dependency>
-      <groupId>com.fasterxml.jackson.core</groupId>
-      <artifactId>jackson-annotations</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-all</artifactId>
-    </dependency>
+      <dependency>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+      </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
     </dependency>
-
-    <dependency>
-      <artifactId>ratis-server</artifactId>
-      <groupId>org.apache.ratis</groupId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-netty</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-grpc</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
   </dependencies>
 
   <build>
@@ -177,11 +159,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>inotify.proto</include>
                   <include>erasurecoding.proto</include>
                   <include>ReconfigurationProtocol.proto</include>
-                  <include>StorageContainerLocationProtocol.proto</include>
-                  <include>DatanodeContainerProtocol.proto</include>
-                  <include>Ozone.proto</include>
-                  <include>KeySpaceManagerProtocol.proto</include>
-                  <include>ScmBlockLocationProtocol.proto</include>
                 </includes>
               </source>
             </configuration>

+ 25 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java

@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -54,6 +54,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
   private String xferAddr;
   private int containerPort; // container Stand_alone Rpc port.
   private int ratisPort; // Container Ratis RPC Port.
+  private int ozoneRestPort;
 
   /**
    * UUID identifying a given datanode. For upgraded Datanodes this is the
@@ -75,6 +76,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
         from.getInfoPort(),
         from.getInfoSecurePort(),
         from.getIpcPort());
+    this.ozoneRestPort = from.getOzoneRestPort();
     this.peerHostName = from.getPeerHostName();
   }
 
@@ -267,6 +269,8 @@ public class DatanodeID implements Comparable<DatanodeID> {
     infoPort = nodeReg.getInfoPort();
     infoSecurePort = nodeReg.getInfoSecurePort();
     ipcPort = nodeReg.getIpcPort();
+    ratisPort = nodeReg.getRatisPort();
+    ozoneRestPort = nodeReg.getOzoneRestPort();
   }
 
   /**
@@ -312,6 +316,24 @@ public class DatanodeID implements Comparable<DatanodeID> {
     this.ratisPort = ratisPort;
   }
 
+  /**
+   * Ozone rest port.
+   *
+   * @return rest port.
+   */
+  public int getOzoneRestPort() {
+    return ozoneRestPort;
+  }
+
+  /**
+   * Set the ozone rest port.
+   *
+   * @param ozoneRestPort
+   */
+  public void setOzoneRestPort(int ozoneRestPort) {
+    this.ozoneRestPort = ozoneRestPort;
+  }
+
   /**
    * Returns a DataNode ID from the protocol buffers.
    *
@@ -326,6 +348,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
         datanodeIDProto.getInfoSecurePort(), datanodeIDProto.getIpcPort());
     id.setContainerPort(datanodeIDProto.getContainerPort());
     id.setRatisPort(datanodeIDProto.getRatisPort());
+    id.setOzoneRestPort(datanodeIDProto.getOzoneRestPort());
     return id;
   }
 
@@ -345,6 +368,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
         .setIpcPort(this.getIpcPort())
         .setContainerPort(this.getContainerPort())
         .setRatisPort(this.getRatisPort())
+        .setOzoneRestPort(this.getOzoneRestPort())
         .build();
   }
 

+ 10 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -292,7 +292,9 @@ public class PBHelperClient {
             dn.getDatanodeUuid() : "")
         .setInfoPort(dn.getInfoPort())
         .setInfoSecurePort(dn.getInfoSecurePort())
-        .setIpcPort(dn.getIpcPort()).build();
+        .setIpcPort(dn.getIpcPort())
+        .setOzoneRestPort(dn.getOzoneRestPort())
+        .build();
   }
 
   public static DatanodeInfoProto.AdminState convert(
@@ -742,9 +744,13 @@ public class PBHelperClient {
 
   // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
-    return new DatanodeID(dn.getIpAddr(), dn.getHostName(),
-        dn.getDatanodeUuid(), dn.getXferPort(), dn.getInfoPort(),
-        dn.hasInfoSecurePort() ? dn.getInfoSecurePort() : 0, dn.getIpcPort());
+    DatanodeID datanodeID =
+        new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(),
+            dn.getXferPort(), dn.getInfoPort(),
+            dn.hasInfoSecurePort() ? dn.getInfoSecurePort() : 0,
+            dn.getIpcPort());
+    datanodeID.setOzoneRestPort(dn.getOzoneRestPort());
+    return datanodeID;
   }
 
   public static AdminStates convert(AdminState adminState) {

+ 0 - 874
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java

@@ -1,874 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import com.google.common.base.Optional;
-
-import com.google.common.base.Preconditions;
-import com.google.common.net.HostAndPort;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.scm.ScmConfigKeys;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.cblock.CBlockConfigKeys
-    .DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
-import static org.apache.hadoop.cblock.CBlockConfigKeys
-    .DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT;
-import static org.apache.hadoop.cblock.CBlockConfigKeys
-    .DFS_CBLOCK_SERVICERPC_PORT_DEFAULT;
-import static org.apache.hadoop.cblock.CBlockConfigKeys
-    .DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
-import static org.apache.hadoop.cblock.CBlockConfigKeys
-    .DFS_CBLOCK_JSCSI_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_BIND_HOST_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_PORT_DEFAULT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_INTERVAL;
-
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-
-/**
- * Utility methods for Ozone and Container Clients.
- *
- * The methods to retrieve SCM service endpoints assume there is a single
- * SCM service instance. This will change when we switch to replicated service
- * instances for redundancy.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class OzoneClientUtils {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      OzoneClientUtils.class);
-  private static final int NO_PORT = -1;
-
-  /**
-   * Date format that used in ozone. Here the format is thread safe to use.
-   */
-  private static final ThreadLocal<DateTimeFormatter> DATE_FORMAT =
-      ThreadLocal.withInitial(() -> {
-        DateTimeFormatter format =
-            DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT);
-        return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
-      });
-
-  /**
-   * The service ID of the solitary Ozone SCM service.
-   */
-  public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
-  public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
-      "OzoneScmServiceInstance";
-
-  private OzoneClientUtils() {
-    // Never constructed
-  }
-
-  /**
-   * Retrieve the socket addresses of all storage container managers.
-   *
-   * @param conf
-   * @return A collection of SCM addresses
-   * @throws IllegalArgumentException If the configuration is invalid
-   */
-  public static Collection<InetSocketAddress> getSCMAddresses(
-      Configuration conf) throws IllegalArgumentException {
-    Collection<InetSocketAddress> addresses =
-        new HashSet<InetSocketAddress>();
-    Collection<String> names =
-        conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
-    if (names == null || names.isEmpty()) {
-      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
-          + " need to be a set of valid DNS names or IP addresses."
-          + " Null or empty address list found.");
-    }
-
-    final com.google.common.base.Optional<Integer>
-        defaultPort =  com.google.common.base.Optional.of(ScmConfigKeys
-        .OZONE_SCM_DEFAULT_PORT);
-    for (String address : names) {
-      com.google.common.base.Optional<String> hostname =
-          OzoneClientUtils.getHostName(address);
-      if (!hostname.isPresent()) {
-        throw new IllegalArgumentException("Invalid hostname for SCM: "
-            + hostname);
-      }
-      com.google.common.base.Optional<Integer> port =
-          OzoneClientUtils.getHostPort(address);
-      InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
-          port.or(defaultPort.get()));
-      addresses.add(addr);
-    }
-    return addresses;
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmAddressForClients(Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration for details" +
-          " on configuring Ozone.");
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" +
-        port.or(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM for block service. If
-   * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   * @throws IllegalArgumentException if configuration is not defined.
-   */
-  public static InetSocketAddress getScmAddressForBlockClients(
-      Configuration conf) {
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      host = getHostNameFromConfigKeys(conf,
-              ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-      if (!host.isPresent()) {
-        throw new IllegalArgumentException(
-                ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY +
-                        " must be defined. See" +
-                        " https://wiki.apache.org/hadoop/Ozone#Configuration" +
-                        " for details on configuring Ozone.");
-      }
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" +
-        port.or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by DataNodes to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getScmAddressForDataNodes(
-      Configuration conf) {
-    // We try the following settings in decreasing priority to retrieve the
-    // target host.
-    // - OZONE_SCM_DATANODE_ADDRESS_KEY
-    // - OZONE_SCM_CLIENT_ADDRESS_KEY
-    //
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration for details" +
-          " on configuring Ozone.");
-    }
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
-
-    InetSocketAddress addr = NetUtils.createSocketAddr(host.get() + ":" +
-        port.or(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    return addr;
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmClientBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT) + ":" +
-            port.or(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM Block service.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   */
-  public static InetSocketAddress getScmBlockClientBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT) +
-            ":" + port.or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by DataNodes to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getScmDataNodeBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY);
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_DEFAULT) + ":" +
-            port.or(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-  }
-
-
-  /**
-   * Retrieve the socket address that is used by KSM.
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getKsmAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(OZONE_KSM_BIND_HOST_DEFAULT) + ":" +
-            port.or(OZONE_KSM_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to KSM.
-   * @param conf
-   * @return Target InetSocketAddress for the KSM service endpoint.
-   */
-  public static InetSocketAddress getKsmAddressForClients(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          OZONE_KSM_ADDRESS_KEY + " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
-              " details on configuring Ozone.");
-    }
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.get() + ":" + port.or(OZONE_KSM_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that is used by CBlock Service.
-   * @param conf
-   * @return Target InetSocketAddress for the CBlock Service endpoint.
-   */
-  public static InetSocketAddress getCblockServiceRpcAddr(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        DFS_CBLOCK_SERVICERPC_ADDRESS_KEY);
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        DFS_CBLOCK_SERVICERPC_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT) + ":" +
-            port.or(DFS_CBLOCK_SERVICERPC_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that is used by CBlock Server.
-   * @param conf
-   * @return Target InetSocketAddress for the CBlock Server endpoint.
-   */
-  public static InetSocketAddress getCblockServerRpcAddr(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY);
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT) + ":" +
-            port.or(DFS_CBLOCK_JSCSI_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the hostname, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf  - Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first hostname component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<String> getHostNameFromConfigKeys(Configuration conf,
-      String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<String> hostName = getHostName(value);
-      if (hostName.isPresent()) {
-        return hostName;
-      }
-    }
-    return Optional.absent();
-  }
-
-  /**
-   * Gets the hostname or Indicates that it is absent.
-   * @param value host or host:port
-   * @return hostname
-   */
-  public static Optional<String> getHostName(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.absent();
-    }
-    return Optional.of(HostAndPort.fromString(value).getHostText());
-  }
-
-  /**
-   * Gets the port if there is one, throws otherwise.
-   * @param value  String in host:port format.
-   * @return Port
-   */
-  public static Optional<Integer> getHostPort(String value) {
-    if((value == null) || value.isEmpty()) {
-      return Optional.absent();
-    }
-    int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT);
-    if (port == NO_PORT) {
-      return Optional.absent();
-    } else {
-      return Optional.of(port);
-    }
-  }
-
-  /**
-   * Returns the cache value to be used for list calls.
-   * @param conf Configuration object
-   * @return list cache size
-   */
-  public static int getListCacheSize(Configuration conf) {
-    return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
-        OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
-  }
-
-  /**
-   * Retrieve the port number, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first port number component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<Integer> getPortNumberFromConfigKeys(
-      Configuration conf, String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<Integer> hostPort = getHostPort(value);
-      if (hostPort.isPresent()) {
-        return hostPort;
-      }
-    }
-    return Optional.absent();
-  }
-
-  /**
-   * Return the list of service addresses for the Ozone SCM. This method is used
-   * by the DataNodes to determine the service instances to connect to.
-   *
-   * @param conf
-   * @return list of SCM service addresses.
-   */
-  public static Map<String, ? extends Map<String, InetSocketAddress>>
-      getScmServiceRpcAddresses(Configuration conf) {
-    final Map<String, InetSocketAddress> serviceInstances = new HashMap<>();
-    serviceInstances.put(OZONE_SCM_SERVICE_INSTANCE_ID,
-        getScmAddressForDataNodes(conf));
-
-    final Map<String, Map<String, InetSocketAddress>> services =
-        new HashMap<>();
-    services.put(OZONE_SCM_SERVICE_ID, serviceInstances);
-    return services;
-  }
-
-  /**
-   * Checks that a given value is with a range.
-   *
-   * For example, sanitizeUserArgs(17, 3, 5, 10)
-   * ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10.
-   *
-   * @param valueTocheck  - value to check
-   * @param baseValue     - the base value that is being used.
-   * @param minFactor     - range min - a 2 here makes us ensure that value
-   *                        valueTocheck is at least twice the baseValue.
-   * @param maxFactor     - range max
-   * @return long
-   */
-  private static long sanitizeUserArgs(long valueTocheck, long baseValue,
-      long minFactor, long maxFactor)
-      throws IllegalArgumentException {
-    if ((valueTocheck >= (baseValue * minFactor)) &&
-        (valueTocheck <= (baseValue * maxFactor))) {
-      return valueTocheck;
-    }
-    String errMsg = String.format("%d is not within min = %d or max = " +
-        "%d", valueTocheck, baseValue * minFactor, baseValue * maxFactor);
-    throw new IllegalArgumentException(errMsg);
-  }
-
-  /**
-   * Returns the interval in which the heartbeat processor thread runs.
-   *
-   * @param conf - Configuration
-   * @return long in Milliseconds.
-   */
-  public static long getScmheartbeatCheckerInterval(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Heartbeat Interval - Defines the heartbeat frequency from a datanode to
-   * SCM.
-   *
-   * @param conf - Ozone Config
-   * @return - HB interval in seconds.
-   */
-  public static long getScmHeartbeatInterval(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL,
-        ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT,
-        TimeUnit.SECONDS);
-  }
-
-  /**
-   * Get the Stale Node interval, which is used by SCM to flag a datanode as
-   * stale, if the heartbeat from that node has been missing for this duration.
-   *
-   * @param conf - Configuration.
-   * @return - Long, Milliseconds to wait before flagging a node as stale.
-   */
-  public static long getStaleNodeInterval(Configuration conf) {
-
-    long staleNodeIntervalMs =
-        conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
-        OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-
-    long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
-
-    long heartbeatIntervalMs = getScmHeartbeatInterval(conf) * 1000;
-
-
-    // Make sure that StaleNodeInterval is configured way above the frequency
-    // at which we run the heartbeat thread.
-    //
-    // Here we check that staleNodeInterval is at least five times more than the
-    // frequency at which the accounting thread is going to run.
-    try {
-      sanitizeUserArgs(staleNodeIntervalMs, heartbeatThreadFrequencyMs,
-          5, 1000);
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Stale Node Interval is cannot be honored due to " +
-              "mis-configured {}. ex:  {}",
-          OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ex);
-      throw ex;
-    }
-
-    // Make sure that stale node value is greater than configured value that
-    // datanodes are going to send HBs.
-    try {
-      sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Stale Node Interval MS is cannot be honored due to " +
-              "mis-configured {}. ex:  {}", OZONE_SCM_HEARTBEAT_INTERVAL, ex);
-      throw ex;
-    }
-    return staleNodeIntervalMs;
-  }
-
-  /**
-   * Gets the interval for dead node flagging. This has to be a value that is
-   * greater than stale node value,  and by transitive relation we also know
-   * that this value is greater than heartbeat interval and heartbeatProcess
-   * Interval.
-   *
-   * @param conf - Configuration.
-   * @return - the interval for dead node flagging.
-   */
-  public static long getDeadNodeInterval(Configuration conf) {
-    long staleNodeIntervalMs = getStaleNodeInterval(conf);
-    long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
-        OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    try {
-      // Make sure that dead nodes Ms is at least twice the time for staleNodes
-      // with a max of 1000 times the staleNodes.
-      sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000);
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Dead Node Interval MS is cannot be honored due to " +
-              "mis-configured {}. ex:  {}", OZONE_SCM_STALENODE_INTERVAL, ex);
-      throw ex;
-    }
-    return deadNodeIntervalMs;
-  }
-
-  /**
-   * Returns the maximum number of heartbeat to process per loop of the process
-   * thread.
-   * @param conf Configuration
-   * @return - int -- Number of HBs to process
-   */
-  public static int getMaxHBToProcessPerLoop(Configuration conf) {
-    return conf.getInt(ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS,
-        ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT);
-  }
-
-  /**
-   * Timeout value for the RPC from Datanode to SCM, primarily used for
-   * Heartbeats and container reports.
-   *
-   * @param conf - Ozone Config
-   * @return - Rpc timeout in Milliseconds.
-   */
-  public static long getScmRpcTimeOutInMilliseconds(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT,
-        OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Log Warn interval.
-   *
-   * @param conf - Ozone Config
-   * @return - Log warn interval.
-   */
-  public static int getLogWarnInterval(Configuration conf) {
-    return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT,
-        OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT);
-  }
-
-  /**
-   * returns the Container port.
-   * @param conf - Conf
-   * @return port number.
-   */
-  public static int getContainerPort(Configuration conf) {
-    return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-  }
-
-  /**
-   * After starting an RPC server, updates configuration with the actual
-   * listening address of that server. The listening address may be different
-   * from the configured address if, for example, the configured address uses
-   * port 0 to request use of an ephemeral port.
-   *
-   * @param conf configuration to update
-   * @param rpcAddressKey configuration key for RPC server address
-   * @param addr configured address
-   * @param rpcServer started RPC server.
-   */
-  public static InetSocketAddress updateRPCListenAddress(
-      OzoneConfiguration conf, String rpcAddressKey,
-      InetSocketAddress addr, RPC.Server rpcServer) {
-    return updateListenAddress(conf, rpcAddressKey, addr,
-        rpcServer.getListenerAddress());
-  }
-
-  /**
-   * After starting an server, updates configuration with the actual
-   * listening address of that server. The listening address may be different
-   * from the configured address if, for example, the configured address uses
-   * port 0 to request use of an ephemeral port.
-   *
-   * @param conf       configuration to update
-   * @param addressKey configuration key for RPC server address
-   * @param addr       configured address
-   * @param listenAddr the real listening address.
-   */
-  public static InetSocketAddress updateListenAddress(OzoneConfiguration conf,
-      String addressKey, InetSocketAddress addr, InetSocketAddress listenAddr) {
-    InetSocketAddress updatedAddr = new InetSocketAddress(addr.getHostString(),
-        listenAddr.getPort());
-    conf.set(addressKey,
-        addr.getHostString() + ":" + listenAddr.getPort());
-    return updatedAddr;
-  }
-
-  /**
-   * Releases a http connection if the request is not null.
-   * @param request
-   */
-  public static void releaseConnection(HttpRequestBase request) {
-    if (request != null) {
-      request.releaseConnection();
-    }
-  }
-
-  /**
-   * @return a default instance of {@link CloseableHttpClient}.
-   */
-  public static CloseableHttpClient newHttpClient() {
-    return OzoneClientUtils.newHttpClient(new OzoneConfiguration());
-  }
-
-  /**
-   * Returns a {@link CloseableHttpClient} configured by given configuration.
-   * If conf is null, returns a default instance.
-   *
-   * @param conf configuration
-   * @return a {@link CloseableHttpClient} instance.
-   */
-  public static CloseableHttpClient newHttpClient(Configuration conf) {
-    long socketTimeout = OzoneConfigKeys
-        .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
-    long connectionTimeout = OzoneConfigKeys
-        .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
-    if (conf != null) {
-      socketTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      connectionTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-    }
-
-    CloseableHttpClient client = HttpClients.custom()
-        .setDefaultRequestConfig(
-            RequestConfig.custom()
-                .setSocketTimeout(Math.toIntExact(socketTimeout))
-                .setConnectTimeout(Math.toIntExact(connectionTimeout))
-                .build())
-        .build();
-    return client;
-  }
-
-  /**
-   * verifies that bucket name / volume name is a valid DNS name.
-   *
-   * @param resName Bucket or volume Name to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyResourceName(String resName)
-      throws IllegalArgumentException {
-
-    if (resName == null) {
-      throw new IllegalArgumentException("Bucket or Volume name is null");
-    }
-
-    if ((resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH) ||
-        (resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH)) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume length is illegal, " +
-              "valid length is 3-63 characters");
-    }
-
-    if ((resName.charAt(0) == '.') || (resName.charAt(0) == '-')) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot start with a period or dash");
-    }
-
-    if ((resName.charAt(resName.length() - 1) == '.') ||
-        (resName.charAt(resName.length() - 1) == '-')) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot end with a period or dash");
-    }
-
-    boolean isIPv4 = true;
-    char prev = (char) 0;
-
-    for (int index = 0; index < resName.length(); index++) {
-      char currChar = resName.charAt(index);
-
-      if (currChar != '.') {
-        isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4;
-      }
-
-      if (currChar > 'A' && currChar < 'Z') {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name does not support uppercase characters");
-      }
-
-      if ((currChar != '.') && (currChar != '-')) {
-        if ((currChar < '0') || (currChar > '9' && currChar < 'a') ||
-            (currChar > 'z')) {
-          throw new IllegalArgumentException("Bucket or Volume name has an " +
-              "unsupported character : " +
-              currChar);
-        }
-      }
-
-      if ((prev == '.') && (currChar == '.')) {
-        throw new IllegalArgumentException("Bucket or Volume name should not " +
-            "have two contiguous periods");
-      }
-
-      if ((prev == '-') && (currChar == '.')) {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name should not have period after dash");
-      }
-
-      if ((prev == '.') && (currChar == '-')) {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name should not have dash after period");
-      }
-      prev = currChar;
-    }
-
-    if (isIPv4) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot be an IPv4 address or all numeric");
-    }
-  }
-
-  /**
-   * Convert time in millisecond to a human readable format required in ozone.
-   * @return a human readable string for the input time
-   */
-  public static String formatDateTime(long millis) {
-    ZonedDateTime dateTime = ZonedDateTime.ofInstant(
-        Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone());
-    return  DATE_FORMAT.get().format(dateTime);
-  }
-
-  /**
-   * Convert time in ozone date format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDateTime(String date) throws ParseException {
-    Preconditions.checkNotNull(date, "Date string should not be null.");
-    return ZonedDateTime.parse(date, DATE_FORMAT.get())
-        .toInstant().getEpochSecond();
-  }
-
-  /**
-   * Returns the maximum no of outstanding async requests to be handled by
-   * Standalone and Ratis client.
-   */
-  public static int getMaxOutstandingRequests(Configuration config) {
-    return config
-        .getInt(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS,
-            ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT);
-  }
-}

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto

@@ -75,6 +75,7 @@ message DatanodeIDProto {
   optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port
   optional uint32 containerPort = 8 [default = 0]; // Ozone stand_alone protocol
   optional uint32 ratisPort = 9 [default = 0]; //Ozone ratis port
+  optional uint32 ozoneRestPort = 10 [default = 0];
 }
 
 /**

+ 0 - 9
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -2,9 +2,6 @@
      <Match>
        <Package name="org.apache.hadoop.record.compiler.generated" />
      </Match>
-     <Match>
-       <Package name="org.apache.hadoop.hdfs.ozone.protocol.proto" />
-     </Match>
      <Match>
        <Package name="org.apache.hadoop.hdfs.protocol.proto" />
      </Match>
@@ -17,12 +14,6 @@
      <Match>
        <Package name="org.apache.hadoop.hdfs.qjournal.protocol" />
      </Match>
-     <Match>
-       <Package name="org.apache.hadoop.ozone.protocol.proto" />
-     </Match>
-     <Match>
-       <Package name ="org.apache.hadoop.cblock.protocol.proto" />
-     </Match>
      <Match>
        <Package name="org.apache.hadoop.hdfs.federation.protocol.proto" />
      </Match>

+ 5 - 119
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -168,6 +168,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>netty</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-all</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
       <groupId>org.apache.htrace</groupId>
       <artifactId>htrace-core4</artifactId>
@@ -187,17 +192,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.fusesource.leveldbjni</groupId>
       <artifactId>leveldbjni-all</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.rocksdb</groupId>
-      <artifactId>rocksdbjni</artifactId>
-      <version>5.8.0</version>
-    </dependency>
-    <dependency>
-      <groupId>io.swagger</groupId>
-      <artifactId>swagger-annotations</artifactId>
-      <version>1.5.9</version>
-      <scope>provided</scope>
-    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.bouncycastle</groupId>
@@ -208,69 +202,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.jscsi</groupId>
-      <artifactId>target</artifactId>
-      <version>2.6.0</version>
-      <optional>true</optional>
-      <exclusions>
-        <exclusion>
-          <groupId>ch.qos.logback</groupId>
-          <artifactId>logback-classic</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.jctools</groupId>
-      <artifactId>jctools-core</artifactId>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>org.xerial</groupId>
-      <artifactId>sqlite-jdbc</artifactId>
-      <version>3.8.7</version>
-    </dependency>
-    <dependency>
-      <groupId>io.kubernetes</groupId>
-      <artifactId>client-java</artifactId>
-      <version>1.0.0-beta1</version>
-      <exclusions>
-        <exclusion>
-          <groupId>io.swagger</groupId>
-          <artifactId>swagger-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.github.stefanbirkner</groupId>
-          <artifactId>system-rules</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-test</artifactId>
       <scope>test</scope>
     </dependency>
-      <dependency>
-          <groupId>io.dropwizard.metrics</groupId>
-          <artifactId>metrics-core</artifactId>
-      </dependency>
     <dependency>
         <groupId>org.assertj</groupId>
         <artifactId>assertj-core</artifactId>
         <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <version>1.19</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <version>1.19</version>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
 
   <build>
@@ -366,7 +307,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
               <tasks>
                 <copy file="src/main/resources/hdfs-default.xml" todir="src/site/resources"/>
-                <copy file="src/main/resources/ozone-default.xml" todir="src/site/resources"/>
                 <copy file="src/main/xsl/configuration.xsl" todir="src/site/resources"/>
               </tasks>
             </configuration>
@@ -403,9 +343,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>QJournalProtocol.proto</include>
                   <include>editlog.proto</include>
                   <include>fsimage.proto</include>
-                  <include>StorageContainerDatanodeProtocol.proto</include>
-                  <include>CBlockServiceProtocol.proto</include>
-                  <include>CBlockClientServerProtocol.proto</include>
                   <include>FederationProtocol.proto</include>
                   <include>RouterProtocol.proto</include>
                   <include>AliasMapProtocol.proto</include>
@@ -472,14 +409,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <exclude>src/main/webapps/static/dataTables.bootstrap.js</exclude>
             <exclude>src/main/webapps/static/d3-v4.1.1.min.js</exclude>
             <exclude>src/test/resources/diskBalancer/data-cluster-3node-3disk.json</exclude>
-            <exclude>src/main/webapps/static/nvd3-1.8.5.min.css.map</exclude>
-            <exclude>src/main/webapps/static/nvd3-1.8.5.min.js</exclude>
-            <exclude>src/main/webapps/static/angular-route-1.6.4.min.js</exclude>
-            <exclude>src/main/webapps/static/nvd3-1.8.5.min.css</exclude>
-            <exclude>src/main/webapps/static/angular-nvd3-1.0.9.min.js</exclude>
-            <exclude>src/main/webapps/static/nvd3-1.8.5.min.js.map</exclude>
-            <exclude>src/main/webapps/static/angular-1.6.4.min.js</exclude>
-            <exclude>src/main/webapps/static/d3-3.5.17.min.js</exclude>
           </excludes>
         </configuration>
       </plugin>
@@ -492,55 +421,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <includes>
                 <include>configuration.xsl</include>
                 <include>hdfs-default.xml</include>
-                <include>ozone-default.xml</include>
               </includes>
               <followSymlinks>false</followSymlinks>
             </fileset>
           </filesets>
         </configuration>
       </plugin>
-      <plugin>
-        <groupId>com.github.kongchen</groupId>
-        <artifactId>swagger-maven-plugin</artifactId>
-        <version>3.1.5</version>
-        <executions>
-          <execution>
-            <phase>compile</phase>
-            <goals>
-              <goal>generate</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <apiSources>
-            <apiSource>
-              <springmvc>false</springmvc>
-              <swaggerDirectory>target/webapps/static</swaggerDirectory>
-              <swaggerFileName>ozone.swagger</swaggerFileName>
-              <schemes>
-                <scheme>http</scheme>
-              </schemes>
-              <host>localhost:9864</host>
-              <basePath>/</basePath>
-              <locations>
-                <location>org.apache.hadoop.ozone.web.interfaces</location>
-              </locations>
-              <info>
-                <title>HDFS Ozone REST Api</title>
-                <version>${project.version}</version>
-                <contact>
-                  <name>Apache Hadoop project</name>
-                  <url>https://hadoop.apache.org</url>
-                </contact>
-                <license>
-                  <url>http://www.apache.org/licenses/LICENSE-2.0.html</url>
-                  <name>Apache 2.0</name>
-                </license>
-              </info>
-            </apiSource>
-          </apiSources>
-        </configuration>
-      </plugin>
     </plugins>
   </build>
 

+ 0 - 47
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -32,11 +32,8 @@ function hadoop_usage
   hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
   hadoop_add_option "--workers" "turn on worker mode"
 
-
   hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
   hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
-  hadoop_add_subcommand "cblock" admin "cblock CLI"
-  hadoop_add_subcommand "cblockserver" daemon "run cblock server"
   hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
   hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
   hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
@@ -49,15 +46,12 @@ function hadoop_usage
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
   hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
   hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
-  hadoop_add_subcommand "freon" client "runs an ozone data generator"
   hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
   hadoop_add_subcommand "getconf" client "get config values from configuration"
   hadoop_add_subcommand "groups" client "get the groups which users belong to"
   hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
   hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode"
-  hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
-  hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
   hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user"
   hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types"
   hadoop_add_subcommand "namenode" daemon "run the DFS namenode"
@@ -65,17 +59,12 @@ function hadoop_usage
   hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file"
   hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage"
   hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage"
-  hadoop_add_subcommand "oz" client "command line interface for ozone"
-  hadoop_add_subcommand "oz_debug" client "ozone debug tool, convert ozone metadata into relational data"
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
-  hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
-  hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
   hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
-
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
 
@@ -97,13 +86,6 @@ function hdfscmd_case
     cacheadmin)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
     ;;
-    cblock)
-      HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
-    ;;
-    cblockserver)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.cblock.CBlockManager
-    ;;
     classpath)
       hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
     ;;
@@ -157,9 +139,6 @@ function hdfscmd_case
     fetchdt)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
     ;;
-    freon)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
-    ;;
     fsck)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
     ;;
@@ -176,17 +155,9 @@ function hdfscmd_case
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
     ;;
-    jscsi)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
-    ;;
     jmxget)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
     ;;
-    ksm)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
-    ;;
     lsSnapshottableDir)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
     ;;
@@ -215,28 +186,10 @@ function hdfscmd_case
     oiv_legacy)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
     ;;
-    oz)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
-    ;;
-    oz_debug)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SQLCLI
-    ;;
     portmap)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
     ;;
-    scmcli)
-       HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
-    ;;
-    scm)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.ozone.scm.StorageContainerManager'
-      hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}"
-    ;;
-    scmcli)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
-    ;;
     secondarynamenode)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh

@@ -17,10 +17,6 @@
 
 MYNAME="${BASH_SOURCE-$0}"
 
-## @description  usage info
-## @audience     private
-## @stability    evolving
-## @replaceable  no
 function hadoop_usage
 {
   hadoop_add_option "--buildpaths" "attempt to add class files from build tree"

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh

@@ -17,10 +17,6 @@
 
 MYNAME="${BASH_SOURCE-$0}"
 
-## @description  usage info
-## @audience     private
-## @stability    evolving
-## @replaceable  no
 function hadoop_usage
 {
   hadoop_add_option "--buildpaths" "attempt to add class files from build tree"

+ 0 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -36,8 +36,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -73,7 +71,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -1497,23 +1494,6 @@ public class DFSUtil {
     return password;
   }
 
-  public static boolean isOzoneEnabled(Configuration conf) {
-    String securityEnabled = conf.get(CommonConfigurationKeysPublic
-            .HADOOP_SECURITY_AUTHENTICATION,
-        "simple");
-    boolean securityAuthorizationEnabled = conf.getBoolean(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
-        false);
-
-    if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
-      LOG.error("Ozone is not supported in a security enabled cluster. ");
-      return false;
-    } else {
-      return conf.getBoolean(OZONE_ENABLED,
-          OZONE_ENABLED_DEFAULT);
-    }
-  }
-
   /**
    * Converts a Date into an ISO-8601 formatted datetime string.
    */

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -189,10 +189,8 @@ public interface HdfsServerConstants {
         return NamenodeRole.NAMENODE;
       }
     }
-
+    
     public void setClusterId(String cid) {
-      Preconditions.checkState(this == UPGRADE || this == UPGRADEONLY
-          || this == FORMAT);
       clusterId = cid;
     }
 
@@ -217,7 +215,6 @@ public interface HdfsServerConstants {
     }
 
     public void setForce(int force) {
-      Preconditions.checkState(this == RECOVER);
       this.force = force;
     }
     
@@ -230,7 +227,6 @@ public interface HdfsServerConstants {
     }
     
     public void setForceFormat(boolean force) {
-      Preconditions.checkState(this == FORMAT);
       isForceFormat = force;
     }
     
@@ -239,7 +235,6 @@ public interface HdfsServerConstants {
     }
     
     public void setInteractiveFormat(boolean interactive) {
-      Preconditions.checkState(this == FORMAT);
       isInteractiveFormat = interactive;
     }
     

+ 42 - 90
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -48,6 +48,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_P
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ReconfigurationProtocolService;
 
@@ -109,10 +110,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker;
 import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -191,7 +190,6 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SaslPropertiesResolver;
 import org.apache.hadoop.security.SecurityUtil;
@@ -373,7 +371,6 @@ public class DataNode extends ReconfigurableBase
   private final String confVersion;
   private final long maxNumberOfBlocksToLog;
   private final boolean pipelineSupportECN;
-  private final boolean ozoneEnabled;
 
   private final List<String> usersWithLocalPathAccess;
   private final boolean connectToDnViaHostname;
@@ -402,8 +399,6 @@ public class DataNode extends ReconfigurableBase
 
   private final SocketFactory socketFactory;
 
-  private DatanodeStateMachine datanodeStateMachine;
-
   private static Tracer createTracer(Configuration conf) {
     return new Tracer.Builder("DataNode").
         conf(TraceUtils.wrapHadoopConf(DATANODE_HTRACE_PREFIX , conf)).
@@ -414,8 +409,6 @@ public class DataNode extends ReconfigurableBase
 
   private ScheduledThreadPoolExecutor metricsLoggerTimer;
 
-  private ObjectStoreHandler objectStoreHandler = null;
-
   /**
    * Creates a dummy DataNode for testing purpose.
    */
@@ -434,7 +427,6 @@ public class DataNode extends ReconfigurableBase
     this.connectToDnViaHostname = false;
     this.blockScanner = new BlockScanner(this, this.getConf());
     this.pipelineSupportECN = false;
-    this.ozoneEnabled = false;
     this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
     this.dnConf = new DNConf(this);
     initOOBTimeout();
@@ -474,8 +466,6 @@ public class DataNode extends ReconfigurableBase
         DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,
         DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);
 
-    this.ozoneEnabled = DFSUtil.isOzoneEnabled(conf);
-
     confVersion = "core-" +
         conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
         ",hdfs-" +
@@ -531,7 +521,7 @@ public class DataNode extends ReconfigurableBase
 
   @Override  // ReconfigurableBase
   protected Configuration getNewConf() {
-    return new OzoneConfiguration();
+    return new HdfsConfiguration();
   }
 
   /**
@@ -961,8 +951,8 @@ public class DataNode extends ReconfigurableBase
     // the DN is started by JSVC, pass it along.
     ServerSocketChannel httpServerChannel = secureResources != null ?
         secureResources.getHttpServerChannel() : null;
-    this.httpServer = new DatanodeHttpServer(getConf(), this, httpServerChannel,
-        this.objectStoreHandler);
+
+    httpServer = new DatanodeHttpServer(getConf(), this, httpServerChannel);
     httpServer.start();
     if (httpServer.getHttpAddress() != null) {
       infoPort = httpServer.getHttpAddress().getPort();
@@ -1411,9 +1401,7 @@ public class DataNode extends ReconfigurableBase
     
     // global DN settings
     registerMXBean();
-
     initDataXceiver();
-    initObjectStoreHandler();
     startInfoServer();
     pauseMonitor = new JvmPauseMonitor();
     pauseMonitor.init(getConf());
@@ -1453,19 +1441,6 @@ public class DataNode extends ReconfigurableBase
     }
   }
 
-  /**
-   * Initializes the object store handler.  This must be called before
-   * initialization of the HTTP server.
-   *
-   * @throws IOException if there is an I/O error
-   */
-  private void initObjectStoreHandler() throws IOException {
-    if (this.ozoneEnabled) {
-      this.objectStoreHandler = new ObjectStoreHandler(getConf());
-      LOG.info("ozone is enabled.");
-    }
-  }
-
   /**
    * Checks if the DataNode has a secure configuration if security is enabled.
    * There are 2 possible configurations that are considered secure:
@@ -1578,6 +1553,11 @@ public class DataNode extends ReconfigurableBase
         streamingAddr.getAddress().getHostAddress(), hostName, 
         storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
             infoSecurePort, getIpcPort());
+    for (ServicePlugin plugin : plugins) {
+      if (plugin instanceof DataNodeServicePlugin) {
+        ((DataNodeServicePlugin) plugin).onDatanodeIdCreation(dnId);
+      }
+    }
     return new DatanodeRegistration(dnId, storageInfo, 
         new ExportedBlockKeys(), VersionInfo.getVersion());
   }
@@ -1598,31 +1578,15 @@ public class DataNode extends ReconfigurableBase
           + ". Expecting " + storage.getDatanodeUuid());
     }
 
-    if (isOzoneEnabled()) {
-      if (datanodeStateMachine == null) {
-        datanodeStateMachine = new DatanodeStateMachine(
-            getDatanodeId(),
-            getConf());
-        datanodeStateMachine.startDaemon();
+    for (ServicePlugin plugin : plugins) {
+      if (plugin instanceof DataNodeServicePlugin) {
+        ((DataNodeServicePlugin) plugin)
+            .onDatanodeSuccessfulNamenodeRegisration(bpRegistration);
       }
     }
     registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
   }
-
-  @VisibleForTesting
-  public OzoneContainer getOzoneContainerManager() {
-    return this.datanodeStateMachine.getContainer();
-  }
-
-  @VisibleForTesting
-  public DatanodeStateMachine.DatanodeStates getOzoneStateMachineState() {
-    if (this.datanodeStateMachine != null) {
-      return this.datanodeStateMachine.getContext().getState();
-    }
-    // if the state machine doesn't exist then DN initialization is in progress
-    return DatanodeStateMachine.DatanodeStates.INIT;
-  }
-
+  
   /**
    * After the block pool has contacted the NN, registers that block pool
    * with the secret manager, updating it with the secrets provided by the NN.
@@ -1729,11 +1693,11 @@ public class DataNode extends ReconfigurableBase
   BPOfferService getBPOfferService(String bpid){
     return blockPoolManager.get(bpid);
   }
-
+  
   int getBpOsCount() {
     return blockPoolManager.getAllNamenodeThreads().size();
   }
-
+  
   /**
    * Initializes the {@link #data}. The initialization is done only once, when
    * handshake with the the first namenode is completed.
@@ -2021,7 +1985,7 @@ public class DataNode extends ReconfigurableBase
         }
       }
     }
-
+    
     List<BPOfferService> bposArray = (this.blockPoolManager == null)
         ? new ArrayList<BPOfferService>()
         : this.blockPoolManager.getAllNamenodeThreads();
@@ -2054,6 +2018,7 @@ public class DataNode extends ReconfigurableBase
 
     // Terminate directory scanner and block scanner
     shutdownPeriodicScanners();
+    shutdownDiskBalancer();
 
     // Stop the web server
     if (httpServer != null) {
@@ -2064,17 +2029,6 @@ public class DataNode extends ReconfigurableBase
       }
     }
 
-    // Stop the object store handler
-    if (isOzoneEnabled()) {
-      if (this.objectStoreHandler != null) {
-        this.objectStoreHandler.close();
-        if(datanodeStateMachine != null &&
-            !datanodeStateMachine.isDaemonStopped()) {
-          datanodeStateMachine.stopDaemon();
-        }
-      }
-    }
-
     volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS);
 
     if (storageLocationChecker != null) {
@@ -2088,7 +2042,7 @@ public class DataNode extends ReconfigurableBase
     // shouldRun is set to false here to prevent certain threads from exiting
     // before the restart prep is done.
     this.shouldRun = false;
-
+    
     // wait reconfiguration thread, if any, to exit
     shutdownReconfigurationTask();
 
@@ -2098,8 +2052,9 @@ public class DataNode extends ReconfigurableBase
       while (true) {
         // When shutting down for restart, wait 2.5 seconds before forcing
         // termination of receiver threads.
-        if (!this.shutdownForUpgrade || (this.shutdownForUpgrade && (
-            Time.monotonicNow() - timeNotified > 1000))) {
+        if (!this.shutdownForUpgrade ||
+            (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
+                > 1000))) {
           this.threadGroup.interrupt();
           break;
         }
@@ -2110,8 +2065,7 @@ public class DataNode extends ReconfigurableBase
         }
         try {
           Thread.sleep(sleepMs);
-        } catch (InterruptedException e) {
-        }
+        } catch (InterruptedException e) {}
         sleepMs = sleepMs * 3 / 2; // exponential backoff
         if (sleepMs > 200) {
           sleepMs = 200;
@@ -2137,9 +2091,9 @@ public class DataNode extends ReconfigurableBase
       metrics.setDataNodeActiveXceiversCount(0);
     }
 
-    // IPC server needs to be shutdown late in the process, otherwise
-    // shutdown command response won't get sent.
-    if (ipcServer != null) {
+   // IPC server needs to be shutdown late in the process, otherwise
+   // shutdown command response won't get sent.
+   if (ipcServer != null) {
       ipcServer.stop();
     }
 
@@ -2154,7 +2108,7 @@ public class DataNode extends ReconfigurableBase
         LOG.warn("Received exception in BlockPoolManager#shutDownAll", ie);
       }
     }
-
+    
     if (storage != null) {
       try {
         this.storage.unlockAll();
@@ -2175,11 +2129,9 @@ public class DataNode extends ReconfigurableBase
       MBeans.unregister(dataNodeInfoBeanName);
       dataNodeInfoBeanName = null;
     }
-    if (shortCircuitRegistry != null) {
-      shortCircuitRegistry.shutdown();
-    }
+    if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown();
     LOG.info("Shutdown complete.");
-    synchronized (this) {
+    synchronized(this) {
       // it is already false, but setting it again to avoid a findbug warning.
       this.shouldRun = false;
       // Notify the main thread.
@@ -2717,9 +2669,8 @@ public class DataNode extends ReconfigurableBase
    */
   public static DataNode instantiateDataNode(String args [], Configuration conf,
       SecureResources resources) throws IOException {
-    if (conf == null) {
-      conf = new OzoneConfiguration();
-    }
+    if (conf == null)
+      conf = new HdfsConfiguration();
     
     if (args != null) {
       // parse generic hadoop options
@@ -3252,12 +3203,6 @@ public class DataNode extends ReconfigurableBase
           } catch (InterruptedException ie) { }
         }
         shutdown();
-
-        if (isOzoneEnabled()) {
-          if(datanodeStateMachine != null) {
-            datanodeStateMachine.stopDaemon();
-          }
-        }
       }
     };
 
@@ -3552,13 +3497,10 @@ public class DataNode extends ReconfigurableBase
     return metricsLoggerTimer;
   }
 
-  public boolean isOzoneEnabled() {
-    return ozoneEnabled;
-  }
-
   public Tracer getTracer() {
     return tracer;
   }
+
   /**
    * Allows submission of a disk balancer Job.
    * @param planID  - Hash value of the plan.
@@ -3676,4 +3618,14 @@ public class DataNode extends ReconfigurableBase
     }
     return volumeInfoList;
   }
+
+  @Private
+  public SecureResources getSecureResources() {
+    return secureResources;
+  }
+
+  @Private
+  public Collection<ServicePlugin> getPlugins() {
+    return Collections.unmodifiableList(plugins);
+  }
 }

+ 48 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeServicePlugin.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.util.ServicePlugin;
+
+/**
+ * Datanode specific service plugin with additional hooks.
+ */
+public interface DataNodeServicePlugin extends ServicePlugin{
+
+  /**
+   * Extension point to modify the datanode id.
+   *
+   * @param dataNodeId
+   */
+  default void onDatanodeIdCreation(DatanodeID dataNodeId) {
+    //NOOP
+  }
+
+  /**
+   * Extension point to modify the datanode id.
+   *
+   * @param dataNodeId
+   */
+  default void onDatanodeSuccessfulNamenodeRegisration(
+      DatanodeRegistration dataNodeId) {
+    //NOOP
+  }
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -1745,7 +1745,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       }
       assert newReplicaInfo.getState() == ReplicaState.FINALIZED
           : "Replica should be finalized";
-      
+
       volumeMap.add(bpid, newReplicaInfo);
       return newReplicaInfo;
     }

+ 3 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java

@@ -51,11 +51,9 @@ import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.web.webhdfs.DataNodeUGIProvider;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
 import org.apache.hadoop.security.ssl.SSLFactory;
@@ -93,19 +91,11 @@ public class DatanodeHttpServer implements Closeable {
 
   public DatanodeHttpServer(final Configuration conf,
       final DataNode datanode,
-      final ServerSocketChannel externalHttpChannel,
-      final ObjectStoreHandler objectStoreHandler)
+      final ServerSocketChannel externalHttpChannel)
     throws IOException {
     this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf);
     this.conf = conf;
 
-    final ObjectStoreJerseyContainer finalContainer;
-    if (objectStoreHandler != null) {
-      finalContainer = objectStoreHandler.getObjectStoreJerseyContainer();
-    } else {
-      finalContainer = null;
-    }
-
     Configuration confForInfoServer = new Configuration(conf);
     confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
     int proxyPort =
@@ -160,8 +150,7 @@ public class DatanodeHttpServer implements Closeable {
           }
           p.addLast(
               new ChunkedWriteHandler(),
-              new URLDispatcher(jettyAddr, conf, confForCreate,
-                  finalContainer));
+              new URLDispatcher(jettyAddr, conf, confForCreate));
         }
       });
 
@@ -218,8 +207,7 @@ public class DatanodeHttpServer implements Closeable {
             }
             p.addLast(
                 new ChunkedWriteHandler(),
-                new URLDispatcher(jettyAddr, conf, confForCreate,
-                  finalContainer));
+                new URLDispatcher(jettyAddr, conf, confForCreate));
           }
         });
     } else {
@@ -386,4 +374,3 @@ public class DatanodeHttpServer implements Closeable {
     }
   }
 }
-

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java

@@ -43,7 +43,7 @@ import org.apache.hadoop.security.http.RestCsrfPreventionFilter.HttpInteraction;
  * handler drops the request and immediately sends an HTTP 400 response.
  */
 @InterfaceAudience.Private
-final class RestCsrfPreventionFilterHandler
+public final class RestCsrfPreventionFilterHandler
     extends SimpleChannelInboundHandler<HttpRequest> {
 
   private static final Log LOG = DatanodeHttpServer.LOG;

+ 6 - 70
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/URLDispatcher.java

@@ -17,106 +17,42 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.web;
 
-import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX;
-
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.ChannelPipeline;
 import io.netty.channel.SimpleChannelInboundHandler;
 import io.netty.handler.codec.http.HttpRequest;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
-import org.apache.hadoop.ozone.web.netty.RequestDispatchObjectStoreChannelHandler;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.net.InetSocketAddress;
 
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX;
+
 class URLDispatcher extends SimpleChannelInboundHandler<HttpRequest> {
-  protected static final Logger LOG =
-      LoggerFactory.getLogger(URLDispatcher.class);
   private final InetSocketAddress proxyHost;
   private final Configuration conf;
   private final Configuration confForCreate;
-  private final ObjectStoreJerseyContainer objectStoreJerseyContainer;
 
   URLDispatcher(InetSocketAddress proxyHost, Configuration conf,
-                Configuration confForCreate,
-                ObjectStoreJerseyContainer objectStoreJerseyContainer)
-      throws IOException {
+                Configuration confForCreate) {
     this.proxyHost = proxyHost;
     this.conf = conf;
     this.confForCreate = confForCreate;
-    this.objectStoreJerseyContainer = objectStoreJerseyContainer;
   }
 
   @Override
   protected void channelRead0(ChannelHandlerContext ctx, HttpRequest req)
       throws Exception {
+    String uri = req.getUri();
     ChannelPipeline p = ctx.pipeline();
-    if (isWebHdfsRequest(req)) {
+    if (uri.startsWith(WEBHDFS_PREFIX)) {
       WebHdfsHandler h = new WebHdfsHandler(conf, confForCreate);
       p.replace(this, WebHdfsHandler.class.getSimpleName(), h);
       h.channelRead0(ctx, req);
-    } else if (isObjectStoreRequest(req)) {
-      RequestDispatchObjectStoreChannelHandler h =
-          new RequestDispatchObjectStoreChannelHandler(
-              this.objectStoreJerseyContainer);
-      p.replace(this,
-          RequestDispatchObjectStoreChannelHandler.class.getSimpleName(), h);
-      h.channelRead0(ctx, req);
-    } else if (!isObjectStoreRequestHeaders(req)){
+    } else {
       SimpleHttpProxyHandler h = new SimpleHttpProxyHandler(proxyHost);
       p.replace(this, SimpleHttpProxyHandler.class.getSimpleName(), h);
       h.channelRead0(ctx, req);
     }
   }
-
-
-  /*
-   * Returns true if the request has ozone headers
-   *
-   * @param req HTTP request
-   * @return true if request has ozone header, else false
-   */
-
-  private boolean isObjectStoreRequestHeaders(HttpRequest req) {
-    for (String version : req.headers().getAll(Header.OZONE_VERSION_HEADER)) {
-      if (version != null) {
-        LOG.debug("ozone : dispatching call to Ozone, when security is not " +
-            "enabled");
-        return true;
-      }
-    }
-    return false;
-  }
-
-
-  /*
-   * Returns true if the request is to be handled by the object store.
-   *
-   * @param req HTTP request
-   * @return true if the request is to be handled by the object store
-   */
-  private boolean isObjectStoreRequest(HttpRequest req) {
-    if (this.objectStoreJerseyContainer == null) {
-      LOG.debug("ozone : ozone is disabled or when security is enabled, ozone" +
-          " is not supported");
-      return false;
-    }
-    return isObjectStoreRequestHeaders(req);
-  }
-
-  /**
-   * Returns true if the request is to be handled by WebHDFS.
-   *
-   * @param req HTTP request
-   * @return true if the request is to be handled by WebHDFS
-   */
-  private boolean isWebHdfsRequest(HttpRequest req) {
-    return req.getUri().startsWith(WEBHDFS_PREFIX);
-  }
 }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java

@@ -34,9 +34,9 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
- * NamespaceInfo is returned by the name-node in reply
+ * NamespaceInfo is returned by the name-node in reply 
  * to a data-node handshake.
- *
+ * 
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -110,7 +110,7 @@ public class NamespaceInfo extends StorageInfo {
     this.capabilities = capabilities;
   }
 
-  public NamespaceInfo(int nsID, String clusterID, String bpID,
+  public NamespaceInfo(int nsID, String clusterID, String bpID, 
       long cT) {
     this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
         VersionInfo.getVersion());
@@ -122,7 +122,7 @@ public class NamespaceInfo extends StorageInfo {
         VersionInfo.getVersion());
     this.state = st;
   }
-
+  
   public long getCapabilities() {
     return capabilities;
   }
@@ -151,7 +151,7 @@ public class NamespaceInfo extends StorageInfo {
   public String getBlockPoolID() {
     return blockPoolID;
   }
-
+  
   public String getSoftwareVersion() {
     return softwareVersion;
   }
@@ -194,4 +194,4 @@ public class NamespaceInfo extends StorageInfo {
           "BPID=" + storage.getBlockPoolID() + ".");
     }
   }
-}
+}

+ 4 - 45
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java

@@ -26,7 +26,6 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Collection;
 import java.util.Set;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -36,8 +35,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
-import org.apache.hadoop.conf.OzoneConfiguration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -81,10 +78,6 @@ public class GetConf extends Configured implements Tool {
         "gets the exclude file path that defines the datanodes " +
         "that need to decommissioned."),
     NNRPCADDRESSES("-nnRpcAddresses", "gets the namenode rpc addresses"),
-    KEYSPACEMANAGER("-keyspacemanagers",
-        "gets list of ozone key space manager nodes in the cluster"),
-    STORAGECONTAINERMANAGER("-storagecontainermanagers",
-        "gets list of ozone storage container manager nodes in the cluster"),
     CONFKEY("-confKey [key]", "gets a specific key from the configuration");
 
     private static final Map<String, CommandHandler> map;
@@ -104,10 +97,6 @@ public class GetConf extends Configured implements Tool {
           new CommandHandler(DFSConfigKeys.DFS_HOSTS_EXCLUDE));
       map.put(StringUtils.toLowerCase(NNRPCADDRESSES.getName()),
           new NNRpcAddressesCommandHandler());
-      map.put(StringUtils.toLowerCase(KEYSPACEMANAGER.getName()),
-          new KeySpaceManagersCommandHandler());
-      map.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()),
-          new StorageContainerManagersCommandHandler());
       map.put(StringUtils.toLowerCase(CONFKEY.getName()),
           new PrintConfKeyCommandHandler());
     }
@@ -235,36 +224,9 @@ public class GetConf extends Configured implements Tool {
    * Handler for {@link Command#SECONDARY}
    */
   static class SecondaryNameNodesCommandHandler extends CommandHandler {
-    @Override public int doWorkInternal(GetConf tool, String[] args)
-        throws IOException {
-      tool.printMap(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
-      return 0;
-    }
-  }
-
-  /**
-   * Handler for {@link Command#STORAGECONTAINERMANAGER}.
-   */
-  static class StorageContainerManagersCommandHandler extends CommandHandler {
     @Override
-    public int doWorkInternal(GetConf tool, String[] args) throws IOException {
-      Collection<InetSocketAddress> addresses =
-          OzoneClientUtils.getSCMAddresses(tool.getConf());
-      for (InetSocketAddress addr : addresses) {
-        tool.printOut(addr.getHostName());
-      }
-      return 0;
-    }
-  }
-
-  /**
-   * Handler for {@link Command#KEYSPACEMANAGER}.
-   */
-  static class KeySpaceManagersCommandHandler extends CommandHandler {
-    @Override
-    public int doWorkInternal(GetConf tool, String[] args) throws IOException {
-      tool.printOut(OzoneClientUtils.getKsmAddress(tool.getConf())
-          .getHostName());
+    public int doWorkInternal(GetConf tool, String []args) throws IOException {
+      tool.printMap(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
       return 0;
     }
   }
@@ -395,11 +357,8 @@ public class GetConf extends Configured implements Tool {
     if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
       System.exit(0);
     }
-
-    Configuration conf = new Configuration();
-    conf.addResource(new HdfsConfiguration());
-    conf.addResource(new OzoneConfiguration());
-    int res = ToolRunner.run(new GetConf(conf), args);
+    
+    int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
     System.exit(res);
   }
 }

+ 3 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dn.js

@@ -18,11 +18,11 @@
 (function () {
   "use strict";
 
-  var data = {ozone: {enabled: false}};
+  var data = {};
 
   dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn'));
 
-  function loadDatanodeInfo() {
+  function load() {
     $.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) {
       data.dn = workaround(resp.beans[0]);
       data.dn.HostName = resp.beans[0]['DatanodeHostname'];
@@ -30,26 +30,6 @@
     }).fail(show_err_msg);
   }
 
-  function loadOzoneScmInfo() {
-        $.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=SCMConnectionManager', function (resp) {
-            if (resp.beans.length > 0) {
-                data.ozone.SCMServers = resp.beans[0].SCMServers;
-                data.ozone.enabled = true;
-                render();
-            }
-        }).fail(show_err_msg);
-  }
-
-  function loadOzoneStorageInfo() {
-        $.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=ContainerLocationManager', function (resp) {
-            if (resp.beans.length > 0) {
-                data.ozone.LocationReport = resp.beans[0].LocationReport;
-                data.ozone.enabled = true;
-                render();
-            }
-        }).fail(show_err_msg);
-    }
-
   function workaround(dn) {
     function node_map_to_array(nodes) {
       var res = [];
@@ -85,8 +65,6 @@
     $('#alert-panel').show();
   }
 
-    loadDatanodeInfo();
-    loadOzoneScmInfo();
-    loadOzoneStorageInfo();
+  load();
 
 })();

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels