Explorar el Código

HADOOP-15791. Remove Ozone related sources from the 3.2 branch. Contributed by Elek, Marton.

Sunil G hace 6 años
padre
commit
2c392da8aa
Se han modificado 100 ficheros con 0 adiciones y 11565 borrados
  1. 0 10
      .gitignore
  2. 0 3
      dev-support/bin/dist-layout-stitching
  3. 0 56
      hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml
  4. 0 2
      hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
  5. 0 5
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  6. 0 9
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  7. 0 44
      hadoop-hdds/client/pom.xml
  8. 0 215
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
  9. 0 224
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
  10. 0 92
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
  11. 0 215
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
  12. 0 476
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
  13. 0 255
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
  14. 0 23
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
  15. 0 23
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
  16. 0 279
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
  17. 0 236
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
  18. 0 23
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
  19. 0 28
      hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
  20. 0 250
      hadoop-hdds/common/pom.xml
  21. 0 157
      hadoop-hdds/common/src/main/conf/log4j.properties
  22. 0 97
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
  23. 0 53
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
  24. 0 351
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
  25. 0 100
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
  26. 0 25
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
  27. 0 35
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
  28. 0 35
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
  29. 0 22
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
  30. 0 90
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
  31. 0 203
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
  32. 0 63
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
  33. 0 28
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
  34. 0 23
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
  35. 0 185
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
  36. 0 162
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
  37. 0 18
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
  38. 0 23
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
  39. 0 401
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
  40. 0 22
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
  41. 0 287
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
  42. 0 81
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
  43. 0 135
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
  44. 0 174
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
  45. 0 24
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
  46. 0 107
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
  47. 0 79
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
  48. 0 36
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
  49. 0 482
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
  50. 0 132
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
  51. 0 53
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
  52. 0 315
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
  53. 0 97
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
  54. 0 104
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
  55. 0 22
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
  56. 0 18
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
  57. 0 24
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
  58. 0 127
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
  59. 0 60
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
  60. 0 100
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
  61. 0 152
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
  62. 0 19
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
  63. 0 173
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
  64. 0 35
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
  65. 0 371
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
  66. 0 36
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
  67. 0 24
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
  68. 0 432
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
  69. 0 23
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
  70. 0 233
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
  71. 0 308
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
  72. 0 216
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
  73. 0 30
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
  74. 0 36
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
  75. 0 76
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
  76. 0 37
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
  77. 0 38
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
  78. 0 131
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
  79. 0 32
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
  80. 0 33
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java
  81. 0 138
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
  82. 0 97
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
  83. 0 97
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
  84. 0 51
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
  85. 0 249
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
  86. 0 183
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
  87. 0 18
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
  88. 0 42
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
  89. 0 68
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
  90. 0 21
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
  91. 0 255
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
  92. 0 184
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
  93. 0 23
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
  94. 0 189
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
  95. 0 46
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java
  96. 0 65
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
  97. 0 45
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
  98. 0 45
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java
  99. 0 251
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
  100. 0 45
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java

+ 0 - 10
.gitignore

@@ -51,13 +51,3 @@ patchprocess/
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
 
-# Ignore files generated by HDDS acceptance tests.
-hadoop-ozone/acceptance-test/docker-compose.log
-hadoop-ozone/acceptance-test/junit-results.xml
-
-#robotframework outputs
-log.html
-output.xml
-report.html
-
-hadoop-ozone/docs/public

+ 0 - 3
dev-support/bin/dist-layout-stitching

@@ -21,9 +21,6 @@ VERSION=$1
 # project.build.directory
 BASEDIR=$2
 
-#hdds.version
-HDDS_VERSION=$3
-
 function run()
 {
   declare res

+ 0 - 56
hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml

@@ -1,56 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
-  <id>hadoop-src</id>
-  <formats>
-    <format>tar.gz</format>
-  </formats>
-  <includeBaseDirectory>true</includeBaseDirectory>
-  <fileSets>
-    <fileSet>
-      <directory>.</directory>
-      <includes>
-        <include>LICENCE.txt</include>
-        <include>README.txt</include>
-        <include>NOTICE.txt</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>.</directory>
-      <useDefaultExcludes>true</useDefaultExcludes>
-      <excludes>
-        <exclude>.git/**</exclude>
-        <exclude>**/.gitignore</exclude>
-        <exclude>**/.svn</exclude>
-        <exclude>**/*.iws</exclude>
-        <exclude>**/*.ipr</exclude>
-        <exclude>**/*.iml</exclude>
-        <exclude>**/.classpath</exclude>
-        <exclude>**/.project</exclude>
-        <exclude>**/.settings</exclude>
-        <exclude>**/target/**</exclude>
-        <!-- until the code that does this is fixed -->
-        <exclude>**/*.log</exclude>
-        <exclude>**/build/**</exclude>
-        <exclude>**/file:/**</exclude>
-        <exclude>**/SecurityAuth.audit*</exclude>
-      </excludes>
-    </fileSet>
-  </fileSets>
-</assembly>

+ 0 - 2
hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml

@@ -50,8 +50,6 @@
         <exclude>**/build/**</exclude>
         <exclude>**/file:/**</exclude>
         <exclude>**/SecurityAuth.audit*</exclude>
-        <exclude>hadoop-ozone/**</exclude>
-        <exclude>hadoop-hdds/**</exclude>
       </excludes>
     </fileSet>
   </fileSets>

+ 0 - 5
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -596,11 +596,6 @@ function hadoop_bootstrap
   YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
   MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
   MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
-  HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
-  HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
-  OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
-  OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
-  OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"}
 
   HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
   HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}

+ 0 - 9
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -403,15 +403,6 @@ esac
 #
 # export HDFS_DFSROUTER_OPTS=""
 
-###
-# Ozone Manager specific parameters
-###
-# Specify the JVM options to be used when starting the Ozone Manager.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_OM_OPTS=""
-
 ###
 # HDFS StorageContainerManager specific parameters
 ###

+ 0 - 44
hadoop-hdds/client/pom.xml

@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.3.0-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>hadoop-hdds-client</artifactId>
-  <version>0.3.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Client Library</description>
-  <name>Apache Hadoop HDDS Client</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-all</artifactId>
-    </dependency>
-
-  </dependencies>
-</project>

+ 0 - 215
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java

@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
-import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.util.Time;
-import org.apache.ratis.shaded.io.grpc.ManagedChannel;
-import org.apache.ratis.shaded.io.grpc.netty.NettyChannelBuilder;
-import org.apache.ratis.shaded.io.grpc.stub.StreamObserver;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public class XceiverClientGrpc extends XceiverClientSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
-  private final Pipeline pipeline;
-  private final Configuration config;
-  private XceiverClientProtocolServiceStub asyncStub;
-  private XceiverClientMetrics metrics;
-  private ManagedChannel channel;
-  private final Semaphore semaphore;
-  private boolean closed = false;
-
-  /**
-   * Constructs a client that can communicate with the Container framework on
-   * data nodes.
-   *
-   * @param pipeline - Pipeline that defines the machines.
-   * @param config -- Ozone Config
-   */
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config) {
-    super();
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkNotNull(config);
-    this.pipeline = pipeline;
-    this.config = config;
-    this.semaphore =
-        new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
-    this.metrics = XceiverClientManager.getXceiverClientMetrics();
-  }
-
-  @Override
-  public void connect() throws Exception {
-    DatanodeDetails leader = this.pipeline.getLeader();
-
-    // read port from the data node, on failure use default configured
-    // port.
-    int port = leader.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-    if (port == 0) {
-      port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    }
-    LOG.debug("Connecting to server Port : " + leader.getIpAddress());
-    channel = NettyChannelBuilder.forAddress(leader.getIpAddress(), port)
-        .usePlaintext()
-        .maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
-        .build();
-    asyncStub = XceiverClientProtocolServiceGrpc.newStub(channel);
-  }
-
-  /**
-   * Returns if the xceiver client connects to a server.
-   *
-   * @return True if the connection is alive, false otherwise.
-   */
-  @VisibleForTesting
-  public boolean isConnected() {
-    return !channel.isTerminated() && !channel.isShutdown();
-  }
-
-  @Override
-  public void close() {
-    closed = true;
-    channel.shutdownNow();
-    try {
-      channel.awaitTermination(60, TimeUnit.MINUTES);
-    } catch (Exception e) {
-      LOG.error("Unexpected exception while waiting for channel termination",
-          e);
-    }
-  }
-
-  @Override
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  @Override
-  public CompletableFuture<ContainerCommandResponseProto>
-      sendCommandAsync(ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException {
-    if(closed){
-      throw new IOException("This channel is not connected.");
-    }
-
-    if(channel == null || !isConnected()) {
-      reconnect();
-    }
-
-    final CompletableFuture<ContainerCommandResponseProto> replyFuture =
-        new CompletableFuture<>();
-    semaphore.acquire();
-    long requestTime = Time.monotonicNowNanos();
-    metrics.incrPendingContainerOpsMetrics(request.getCmdType());
-    // create a new grpc stream for each non-async call.
-    final StreamObserver<ContainerCommandRequestProto> requestObserver =
-        asyncStub.send(new StreamObserver<ContainerCommandResponseProto>() {
-          @Override
-          public void onNext(ContainerCommandResponseProto value) {
-            replyFuture.complete(value);
-            metrics.decrPendingContainerOpsMetrics(request.getCmdType());
-            metrics.addContainerOpsLatency(request.getCmdType(),
-                Time.monotonicNowNanos() - requestTime);
-            semaphore.release();
-          }
-          @Override
-          public void onError(Throwable t) {
-            replyFuture.completeExceptionally(t);
-            metrics.decrPendingContainerOpsMetrics(request.getCmdType());
-            metrics.addContainerOpsLatency(request.getCmdType(),
-                Time.monotonicNowNanos() - requestTime);
-            semaphore.release();
-          }
-
-          @Override
-          public void onCompleted() {
-            if (!replyFuture.isDone()) {
-              replyFuture.completeExceptionally(
-                  new IOException("Stream completed but no reply for request "
-                      + request));
-            }
-          }
-        });
-    requestObserver.onNext(request);
-    requestObserver.onCompleted();
-    return replyFuture;
-  }
-
-  private void reconnect() throws IOException {
-    try {
-      connect();
-    } catch (Exception e) {
-      LOG.error("Error while connecting: ", e);
-      throw new IOException(e);
-    }
-
-    if (channel == null || !isConnected()) {
-      throw new IOException("This channel is not connected.");
-    }
-  }
-
-  /**
-   * Create a pipeline.
-   */
-  @Override
-  public void createPipeline() {
-    // For stand alone pipeline, there is no notion called setup pipeline.
-  }
-
-  public void destroyPipeline() {
-    // For stand alone pipeline, there is no notion called destroy pipeline.
-  }
-
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - Stand Alone as the type.
-   */
-  @Override
-  public HddsProtos.ReplicationType getPipelineType() {
-    return HddsProtos.ReplicationType.STAND_ALONE;
-  }
-}

+ 0 - 224
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java

@@ -1,224 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_MAX_SIZE_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
-
-/**
- * XceiverClientManager is responsible for the lifecycle of XceiverClient
- * instances.  Callers use this class to acquire an XceiverClient instance
- * connected to the desired container pipeline.  When done, the caller also uses
- * this class to release the previously acquired XceiverClient instance.
- *
- *
- * This class caches connection to container for reuse purpose, such that
- * accessing same container frequently will be through the same connection
- * without reestablishing connection. But the connection will be closed if
- * not being used for a period of time.
- */
-public class XceiverClientManager implements Closeable {
-
-  //TODO : change this to SCM configuration class
-  private final Configuration conf;
-  private final Cache<Long, XceiverClientSpi> clientCache;
-  private final boolean useRatis;
-
-  private static XceiverClientMetrics metrics;
-  /**
-   * Creates a new XceiverClientManager.
-   *
-   * @param conf configuration
-   */
-  public XceiverClientManager(Configuration conf) {
-    Preconditions.checkNotNull(conf);
-    int maxSize = conf.getInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY,
-        SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT);
-    long staleThresholdMs = conf.getTimeDuration(
-        SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY,
-        SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT, TimeUnit.MILLISECONDS);
-    this.useRatis = conf.getBoolean(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    this.conf = conf;
-    this.clientCache = CacheBuilder.newBuilder()
-        .expireAfterAccess(staleThresholdMs, TimeUnit.MILLISECONDS)
-        .maximumSize(maxSize)
-        .removalListener(
-            new RemovalListener<Long, XceiverClientSpi>() {
-            @Override
-            public void onRemoval(
-                RemovalNotification<Long, XceiverClientSpi>
-                  removalNotification) {
-              synchronized (clientCache) {
-                // Mark the entry as evicted
-                XceiverClientSpi info = removalNotification.getValue();
-                info.setEvicted();
-              }
-            }
-          }).build();
-  }
-
-  @VisibleForTesting
-  public Cache<Long, XceiverClientSpi> getClientCache() {
-    return clientCache;
-  }
-
-  /**
-   * Acquires a XceiverClientSpi connected to a container capable of
-   * storing the specified key.
-   *
-   * If there is already a cached XceiverClientSpi, simply return
-   * the cached otherwise create a new one.
-   *
-   * @param pipeline the container pipeline for the client connection
-   * @return XceiverClientSpi connected to a container
-   * @throws IOException if a XceiverClientSpi cannot be acquired
-   */
-  public XceiverClientSpi acquireClient(Pipeline pipeline, long containerID)
-      throws IOException {
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkArgument(pipeline.getMachines() != null);
-    Preconditions.checkArgument(!pipeline.getMachines().isEmpty());
-
-    synchronized (clientCache) {
-      XceiverClientSpi info = getClient(pipeline, containerID);
-      info.incrementReference();
-      return info;
-    }
-  }
-
-  /**
-   * Releases a XceiverClientSpi after use.
-   *
-   * @param client client to release
-   */
-  public void releaseClient(XceiverClientSpi client) {
-    Preconditions.checkNotNull(client);
-    synchronized (clientCache) {
-      client.decrementReference();
-    }
-  }
-
-  private XceiverClientSpi getClient(Pipeline pipeline, long containerID)
-      throws IOException {
-    try {
-      return clientCache.get(containerID,
-          new Callable<XceiverClientSpi>() {
-          @Override
-          public XceiverClientSpi call() throws Exception {
-            XceiverClientSpi client = null;
-            switch (pipeline.getType()) {
-            case RATIS:
-              client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf);
-              break;
-            case STAND_ALONE:
-              client = new XceiverClientGrpc(pipeline, conf);
-              break;
-            case CHAINED:
-            default:
-              throw new IOException("not implemented" + pipeline.getType());
-            }
-            client.connect();
-            return client;
-          }
-        });
-    } catch (Exception e) {
-      throw new IOException(
-          "Exception getting XceiverClient: " + e.toString(), e);
-    }
-  }
-
-  /**
-   * Close and remove all the cached clients.
-   */
-  public void close() {
-    //closing is done through RemovalListener
-    clientCache.invalidateAll();
-    clientCache.cleanUp();
-
-    if (metrics != null) {
-      metrics.unRegister();
-    }
-  }
-
-  /**
-   * Tells us if Ratis is enabled for this cluster.
-   * @return True if Ratis is enabled.
-   */
-  public boolean isUseRatis() {
-    return useRatis;
-  }
-
-  /**
-   * Returns hard coded 3 as replication factor.
-   * @return 3
-   */
-  public  HddsProtos.ReplicationFactor getFactor() {
-    if(isUseRatis()) {
-      return HddsProtos.ReplicationFactor.THREE;
-    }
-    return HddsProtos.ReplicationFactor.ONE;
-  }
-
-  /**
-   * Returns the default replication type.
-   * @return Ratis or Standalone
-   */
-  public HddsProtos.ReplicationType getType() {
-    // TODO : Fix me and make Ratis default before release.
-    // TODO: Remove this as replication factor and type are pipeline properties
-    if(isUseRatis()) {
-      return HddsProtos.ReplicationType.RATIS;
-    }
-    return HddsProtos.ReplicationType.STAND_ALONE;
-  }
-
-  /**
-   * Get xceiver client metric.
-   */
-  public synchronized static XceiverClientMetrics getXceiverClientMetrics() {
-    if (metrics == null) {
-      metrics = XceiverClientMetrics.create();
-    }
-
-    return metrics;
-  }
-}

+ 0 - 92
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java

@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- * The client metrics for the Storage Container protocol.
- */
-@InterfaceAudience.Private
-@Metrics(about = "Storage Container Client Metrics", context = "dfs")
-public class XceiverClientMetrics {
-  public static final String SOURCE_NAME = XceiverClientMetrics.class
-      .getSimpleName();
-
-  private @Metric MutableCounterLong pendingOps;
-  private MutableCounterLong[] pendingOpsArray;
-  private MutableRate[] containerOpsLatency;
-  private MetricsRegistry registry;
-
-  public XceiverClientMetrics() {
-    int numEnumEntries = ContainerProtos.Type.values().length;
-    this.registry = new MetricsRegistry(SOURCE_NAME);
-
-    this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
-    this.containerOpsLatency = new MutableRate[numEnumEntries];
-    for (int i = 0; i < numEnumEntries; i++) {
-      pendingOpsArray[i] = registry.newCounter(
-          "numPending" + ContainerProtos.Type.forNumber(i + 1),
-          "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops",
-          (long) 0);
-
-      containerOpsLatency[i] = registry.newRate(
-          ContainerProtos.Type.forNumber(i + 1) + "Latency",
-          "latency of " + ContainerProtos.Type.forNumber(i + 1)
-          + " ops");
-    }
-  }
-
-  public static XceiverClientMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME, "Storage Container Client Metrics",
-        new XceiverClientMetrics());
-  }
-
-  public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
-    pendingOps.incr();
-    pendingOpsArray[type.ordinal()].incr();
-  }
-
-  public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) {
-    pendingOps.incr(-1);
-    pendingOpsArray[type.ordinal()].incr(-1);
-  }
-
-  public void addContainerOpsLatency(ContainerProtos.Type type,
-      long latencyNanos) {
-    containerOpsLatency[type.ordinal()].add(latencyNanos);
-  }
-
-  public long getContainerOpsMetrics(ContainerProtos.Type type) {
-    return pendingOpsArray[type.ordinal()].value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}

+ 0 - 215
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java

@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.io.MultipleIOException;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.shaded.com.google.protobuf
-    .InvalidProtocolBufferException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.util.CheckedBiConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * An abstract implementation of {@link XceiverClientSpi} using Ratis.
- * The underlying RPC mechanism can be chosen via the constructor.
- */
-public final class XceiverClientRatis extends XceiverClientSpi {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClientRatis.class);
-
-  public static XceiverClientRatis newXceiverClientRatis(
-      Pipeline pipeline, Configuration ozoneConf) {
-    final String rpcType = ozoneConf.get(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
-    final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
-    return new XceiverClientRatis(pipeline,
-        SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
-        retryPolicy);
-  }
-
-  private final Pipeline pipeline;
-  private final RpcType rpcType;
-  private final AtomicReference<RaftClient> client = new AtomicReference<>();
-  private final int maxOutstandingRequests;
-  private final RetryPolicy retryPolicy;
-
-  /**
-   * Constructs a client.
-   */
-  private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
-      int maxOutStandingChunks, RetryPolicy retryPolicy) {
-    super();
-    this.pipeline = pipeline;
-    this.rpcType = rpcType;
-    this.maxOutstandingRequests = maxOutStandingChunks;
-    this.retryPolicy = retryPolicy;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public void createPipeline() throws IOException {
-    final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
-    LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
-    callRatisRpc(pipeline.getMachines(),
-        (raftClient, peer) -> raftClient.groupAdd(group, peer.getId()));
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public void destroyPipeline() throws IOException {
-    final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
-    LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
-    callRatisRpc(pipeline.getMachines(), (raftClient, peer) -> raftClient
-        .groupRemove(group.getGroupId(), true, peer.getId()));
-  }
-
-  /**
-   * Returns Ratis as pipeline Type.
-   *
-   * @return - Ratis
-   */
-  @Override
-  public HddsProtos.ReplicationType getPipelineType() {
-    return HddsProtos.ReplicationType.RATIS;
-  }
-
-  private void callRatisRpc(List<DatanodeDetails> datanodes,
-      CheckedBiConsumer<RaftClient, RaftPeer, IOException> rpc)
-      throws IOException {
-    if (datanodes.isEmpty()) {
-      return;
-    }
-
-    final List<IOException> exceptions =
-        Collections.synchronizedList(new ArrayList<>());
-    datanodes.parallelStream().forEach(d -> {
-      final RaftPeer p = RatisHelper.toRaftPeer(d);
-      try (RaftClient client = RatisHelper
-          .newRaftClient(rpcType, p, retryPolicy)) {
-        rpc.accept(client, p);
-      } catch (IOException ioe) {
-        exceptions.add(
-            new IOException("Failed invoke Ratis rpc " + rpc + " for " + d,
-                ioe));
-      }
-    });
-    if (!exceptions.isEmpty()) {
-      throw MultipleIOException.createIOException(exceptions);
-    }
-  }
-
-  @Override
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  @Override
-  public void connect() throws Exception {
-    LOG.debug("Connecting to pipeline:{} leader:{}",
-        getPipeline().getId(),
-        RatisHelper.toRaftPeerId(pipeline.getLeader()));
-    // TODO : XceiverClient ratis should pass the config value of
-    // maxOutstandingRequests so as to set the upper bound on max no of async
-    // requests to be handled by raft client
-    if (!client.compareAndSet(null,
-        RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy))) {
-      throw new IllegalStateException("Client is already connected.");
-    }
-  }
-
-  @Override
-  public void close() {
-    final RaftClient c = client.getAndSet(null);
-    if (c != null) {
-      try {
-        c.close();
-      } catch (IOException e) {
-        throw new IllegalStateException(e);
-      }
-    }
-  }
-
-  private RaftClient getClient() {
-    return Objects.requireNonNull(client.get(), "client is null");
-  }
-
-  private CompletableFuture<RaftClientReply> sendRequestAsync(
-      ContainerCommandRequestProto request) {
-    boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
-    ByteString byteString = request.toByteString();
-    LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
-    return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) :
-        getClient().sendAsync(() -> byteString);
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  @Override
-  public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
-      ContainerCommandRequestProto request) {
-    return sendRequestAsync(request).whenComplete((reply, e) ->
-          LOG.debug("received reply {} for request: {} exception: {}", request,
-              reply, e))
-        .thenApply(reply -> {
-          try {
-            return ContainerCommandResponseProto.parseFrom(
-                reply.getMessage().getContent());
-          } catch (InvalidProtocolBufferException e) {
-            throw new CompletionException(e);
-          }
-        });
-  }
-}

+ 0 - 476
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java

@@ -1,476 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
-    .ALLOCATED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
-    .OPEN;
-
-/**
- * This class provides the client-facing APIs of container operations.
- */
-public class ContainerOperationClient implements ScmClient {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerOperationClient.class);
-  private static long containerSizeB = -1;
-  private final StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private final XceiverClientManager xceiverClientManager;
-
-  public ContainerOperationClient(
-      StorageContainerLocationProtocolClientSideTranslatorPB
-          storageContainerLocationClient,
-      XceiverClientManager xceiverClientManager) {
-    this.storageContainerLocationClient = storageContainerLocationClient;
-    this.xceiverClientManager = xceiverClientManager;
-  }
-
-  /**
-   * Return the capacity of containers. The current assumption is that all
-   * containers have the same capacity. Therefore one static is sufficient for
-   * any container.
-   * @return The capacity of one container in number of bytes.
-   */
-  public static long getContainerSizeB() {
-    return containerSizeB;
-  }
-
-  /**
-   * Set the capacity of container. Should be exactly once on system start.
-   * @param size Capacity of one container in number of bytes.
-   */
-  public static void setContainerSizeB(long size) {
-    containerSizeB = size;
-  }
-
-  /**
-   * @inheritDoc
-   */
-  @Override
-  public ContainerWithPipeline createContainer(String owner)
-      throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      ContainerWithPipeline containerWithPipeline =
-          storageContainerLocationClient.allocateContainer(
-              xceiverClientManager.getType(),
-              xceiverClientManager.getFactor(), owner);
-      Pipeline pipeline = containerWithPipeline.getPipeline();
-      client = xceiverClientManager.acquireClient(pipeline,
-          containerWithPipeline.getContainerInfo().getContainerID());
-
-      // Allocated State means that SCM has allocated this pipeline in its
-      // namespace. The client needs to create the pipeline on the machines
-      // which was choosen by the SCM.
-      Preconditions.checkState(pipeline.getLifeCycleState() == ALLOCATED ||
-          pipeline.getLifeCycleState() == OPEN, "Unexpected pipeline state");
-      if (pipeline.getLifeCycleState() == ALLOCATED) {
-        createPipeline(client, pipeline);
-      }
-      createContainer(client,
-          containerWithPipeline.getContainerInfo().getContainerID());
-      return containerWithPipeline;
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client);
-      }
-    }
-  }
-
-  /**
-   * Create a container over pipeline specified by the SCM.
-   *
-   * @param client - Client to communicate with Datanodes.
-   * @param containerId - Container ID.
-   * @throws IOException
-   */
-  public void createContainer(XceiverClientSpi client,
-      long containerId) throws IOException {
-    String traceID = UUID.randomUUID().toString();
-    storageContainerLocationClient.notifyObjectStageChange(
-        ObjectStageChangeRequestProto.Type.container,
-        containerId,
-        ObjectStageChangeRequestProto.Op.create,
-        ObjectStageChangeRequestProto.Stage.begin);
-    ContainerProtocolCalls.createContainer(client, containerId, traceID);
-    storageContainerLocationClient.notifyObjectStageChange(
-        ObjectStageChangeRequestProto.Type.container,
-        containerId,
-        ObjectStageChangeRequestProto.Op.create,
-        ObjectStageChangeRequestProto.Stage.complete);
-
-    // Let us log this info after we let SCM know that we have completed the
-    // creation state.
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Created container " + containerId
-          + " leader:" + client.getPipeline().getLeader()
-          + " machines:" + client.getPipeline().getMachines());
-    }
-  }
-
-  /**
-   * Creates a pipeline over the machines choosen by the SCM.
-   *
-   * @param client - Client
-   * @param pipeline - pipeline to be createdon Datanodes.
-   * @throws IOException
-   */
-  private void createPipeline(XceiverClientSpi client, Pipeline pipeline)
-      throws IOException {
-
-    Preconditions.checkNotNull(pipeline.getId(), "Pipeline " +
-        "name cannot be null when client create flag is set.");
-
-    // Pipeline creation is a three step process.
-    //
-    // 1. Notify SCM that this client is doing a create pipeline on
-    // datanodes.
-    //
-    // 2. Talk to Datanodes to create the pipeline.
-    //
-    // 3. update SCM that pipeline creation was successful.
-
-    // TODO: this has not been fully implemented on server side
-    // SCMClientProtocolServer#notifyObjectStageChange
-    // TODO: when implement the pipeline state machine, change
-    // the pipeline name (string) to pipeline id (long)
-    //storageContainerLocationClient.notifyObjectStageChange(
-    //    ObjectStageChangeRequestProto.Type.pipeline,
-    //    pipeline.getPipelineName(),
-    //    ObjectStageChangeRequestProto.Op.create,
-    //    ObjectStageChangeRequestProto.Stage.begin);
-
-    client.createPipeline();
-
-    //storageContainerLocationClient.notifyObjectStageChange(
-    //    ObjectStageChangeRequestProto.Type.pipeline,
-    //    pipeline.getPipelineName(),
-    //    ObjectStageChangeRequestProto.Op.create,
-    //    ObjectStageChangeRequestProto.Stage.complete);
-
-    // TODO : Should we change the state on the client side ??
-    // That makes sense, but it is not needed for the client to work.
-    LOG.debug("Pipeline creation successful. Pipeline: {}",
-        pipeline.toString());
-  }
-
-  /**
-   * @inheritDoc
-   */
-  @Override
-  public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, String owner) throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      // allocate container on SCM.
-      ContainerWithPipeline containerWithPipeline =
-          storageContainerLocationClient.allocateContainer(type, factor,
-              owner);
-      Pipeline pipeline = containerWithPipeline.getPipeline();
-      client = xceiverClientManager.acquireClient(pipeline,
-          containerWithPipeline.getContainerInfo().getContainerID());
-
-      // Allocated State means that SCM has allocated this pipeline in its
-      // namespace. The client needs to create the pipeline on the machines
-      // which was choosen by the SCM.
-      if (pipeline.getLifeCycleState() == ALLOCATED) {
-        createPipeline(client, pipeline);
-      }
-      // connect to pipeline leader and allocate container on leader datanode.
-      client = xceiverClientManager.acquireClient(pipeline,
-          containerWithPipeline.getContainerInfo().getContainerID());
-      createContainer(client,
-          containerWithPipeline.getContainerInfo().getContainerID());
-      return containerWithPipeline;
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client);
-      }
-    }
-  }
-
-  /**
-   * Returns a set of Nodes that meet a query criteria.
-   *
-   * @param nodeStatuses - Criteria that we want the node to have.
-   * @param queryScope - Query scope - Cluster or pool.
-   * @param poolName - if it is pool, a pool name is required.
-   * @return A set of nodes that meet the requested criteria.
-   * @throws IOException
-   */
-  @Override
-  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
-      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
-      throws IOException {
-    return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
-        poolName);
-  }
-
-  /**
-   * Creates a specified replication pipeline.
-   */
-  @Override
-  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException {
-    return storageContainerLocationClient.createReplicationPipeline(type,
-        factor, nodePool);
-  }
-
-  @Override
-  public void close() {
-    try {
-      xceiverClientManager.close();
-    } catch (Exception ex) {
-      LOG.error("Can't close " + this.getClass().getSimpleName(), ex);
-    }
-  }
-
-  /**
-   * Deletes an existing container.
-   *
-   * @param containerId - ID of the container.
-   * @param pipeline    - Pipeline that represents the container.
-   * @param force       - true to forcibly delete the container.
-   * @throws IOException
-   */
-  @Override
-  public void deleteContainer(long containerId, Pipeline pipeline,
-      boolean force) throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      client = xceiverClientManager.acquireClient(pipeline, containerId);
-      String traceID = UUID.randomUUID().toString();
-      ContainerProtocolCalls
-          .deleteContainer(client, containerId, force, traceID);
-      storageContainerLocationClient
-          .deleteContainer(containerId);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleted container {}, leader: {}, machines: {} ",
-            containerId,
-            pipeline.getLeader(),
-            pipeline.getMachines());
-      }
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client);
-      }
-    }
-  }
-
-  /**
-   * Delete the container, this will release any resource it uses.
-   * @param containerID - containerID.
-   * @param force - True to forcibly delete the container.
-   * @throws IOException
-   */
-  @Override
-  public void deleteContainer(long containerID, boolean force)
-      throws IOException {
-    ContainerWithPipeline info = getContainerWithPipeline(containerID);
-    deleteContainer(containerID, info.getPipeline(), force);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<ContainerInfo> listContainer(long startContainerID,
-      int count) throws IOException {
-    return storageContainerLocationClient.listContainer(
-        startContainerID, count);
-  }
-
-  /**
-   * Get meta data from an existing container.
-   *
-   * @param containerID - ID of the container.
-   * @param pipeline    - Pipeline where the container is located.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  @Override
-  public ContainerData readContainer(long containerID,
-      Pipeline pipeline) throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      client = xceiverClientManager.acquireClient(pipeline, containerID);
-      String traceID = UUID.randomUUID().toString();
-      ReadContainerResponseProto response =
-          ContainerProtocolCalls.readContainer(client, containerID, traceID);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Read container {}, leader: {}, machines: {} ",
-            containerID,
-            pipeline.getLeader(),
-            pipeline.getMachines());
-      }
-      return response.getContainerData();
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client);
-      }
-    }
-  }
-
-  /**
-   * Get meta data from an existing container.
-   * @param containerID - ID of the container.
-   * @return ContainerInfo - a message of protobuf which has basic info
-   * of a container.
-   * @throws IOException
-   */
-  @Override
-  public ContainerData readContainer(long containerID) throws IOException {
-    ContainerWithPipeline info = getContainerWithPipeline(containerID);
-    return readContainer(containerID, info.getPipeline());
-  }
-
-  /**
-   * Given an id, return the pipeline associated with the container.
-   * @param containerId - String Container ID
-   * @return Pipeline of the existing container, corresponding to the given id.
-   * @throws IOException
-   */
-  @Override
-  public ContainerInfo getContainer(long containerId) throws
-      IOException {
-    return storageContainerLocationClient.getContainer(containerId);
-  }
-
-  /**
-   * Gets a container by Name -- Throws if the container does not exist.
-   *
-   * @param containerId - Container ID
-   * @return ContainerWithPipeline
-   * @throws IOException
-   */
-  @Override
-  public ContainerWithPipeline getContainerWithPipeline(long containerId)
-      throws IOException {
-    return storageContainerLocationClient.getContainerWithPipeline(containerId);
-  }
-
-  /**
-   * Close a container.
-   *
-   * @param pipeline the container to be closed.
-   * @throws IOException
-   */
-  @Override
-  public void closeContainer(long containerId, Pipeline pipeline)
-      throws IOException {
-    XceiverClientSpi client = null;
-    try {
-      LOG.debug("Close container {}", pipeline);
-      /*
-      TODO: two orders here, revisit this later:
-      1. close on SCM first, then on data node
-      2. close on data node first, then on SCM
-
-      with 1: if client failed after closing on SCM, then there is a
-      container SCM thinks as closed, but is actually open. Then SCM will no
-      longer allocate block to it, which is fine. But SCM may later try to
-      replicate this "closed" container, which I'm not sure is safe.
-
-      with 2: if client failed after close on datanode, then there is a
-      container SCM thinks as open, but is actually closed. Then SCM will still
-      try to allocate block to it. Which will fail when actually doing the
-      write. No more data can be written, but at least the correctness and
-      consistency of existing data will maintain.
-
-      For now, take the #2 way.
-       */
-      // Actually close the container on Datanode
-      client = xceiverClientManager.acquireClient(pipeline, containerId);
-      String traceID = UUID.randomUUID().toString();
-
-      storageContainerLocationClient.notifyObjectStageChange(
-          ObjectStageChangeRequestProto.Type.container,
-          containerId,
-          ObjectStageChangeRequestProto.Op.close,
-          ObjectStageChangeRequestProto.Stage.begin);
-
-      ContainerProtocolCalls.closeContainer(client, containerId, traceID);
-      // Notify SCM to close the container
-      storageContainerLocationClient.notifyObjectStageChange(
-          ObjectStageChangeRequestProto.Type.container,
-          containerId,
-          ObjectStageChangeRequestProto.Op.close,
-          ObjectStageChangeRequestProto.Stage.complete);
-    } finally {
-      if (client != null) {
-        xceiverClientManager.releaseClient(client);
-      }
-    }
-  }
-
-  /**
-   * Close a container.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void closeContainer(long containerId)
-      throws IOException {
-    ContainerWithPipeline info = getContainerWithPipeline(containerId);
-    Pipeline pipeline = info.getPipeline();
-    closeContainer(containerId, pipeline);
-  }
-
-  /**
-   * Get the the current usage information.
-   * @param containerID - ID of the container.
-   * @return the size of the given container.
-   * @throws IOException
-   */
-  @Override
-  public long getContainerSize(long containerID) throws IOException {
-    // TODO : Fix this, it currently returns the capacity
-    // but not the current usage.
-    long size = getContainerSizeB();
-    if (size == -1) {
-      throw new IOException("Container size unknown!");
-    }
-    return size;
-  }
-}

+ 0 - 255
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java

@@ -1,255 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Utility methods for Ozone and Container Clients.
- *
- * The methods to retrieve SCM service endpoints assume there is a single
- * SCM service instance. This will change when we switch to replicated service
- * instances for redundancy.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class HddsClientUtils {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsClientUtils.class);
-
-  private static final int NO_PORT = -1;
-
-  private HddsClientUtils() {
-  }
-
-  /**
-   * Date format that used in ozone. Here the format is thread safe to use.
-   */
-  private static final ThreadLocal<DateTimeFormatter> DATE_FORMAT =
-      ThreadLocal.withInitial(() -> {
-        DateTimeFormatter format =
-            DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT);
-        return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
-      });
-
-
-  /**
-   * Convert time in millisecond to a human readable format required in ozone.
-   * @return a human readable string for the input time
-   */
-  public static String formatDateTime(long millis) {
-    ZonedDateTime dateTime = ZonedDateTime.ofInstant(
-        Instant.ofEpochMilli(millis), DATE_FORMAT.get().getZone());
-    return DATE_FORMAT.get().format(dateTime);
-  }
-
-  /**
-   * Convert time in ozone date format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDateTime(String date) throws ParseException {
-    Preconditions.checkNotNull(date, "Date string should not be null.");
-    return ZonedDateTime.parse(date, DATE_FORMAT.get())
-        .toInstant().toEpochMilli();
-  }
-
-
-
-  /**
-   * verifies that bucket name / volume name is a valid DNS name.
-   *
-   * @param resName Bucket or volume Name to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyResourceName(String resName)
-      throws IllegalArgumentException {
-
-    if (resName == null) {
-      throw new IllegalArgumentException("Bucket or Volume name is null");
-    }
-
-    if ((resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH) ||
-        (resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH)) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume length is illegal, " +
-              "valid length is 3-63 characters");
-    }
-
-    if ((resName.charAt(0) == '.') || (resName.charAt(0) == '-')) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot start with a period or dash");
-    }
-
-    if ((resName.charAt(resName.length() - 1) == '.') ||
-        (resName.charAt(resName.length() - 1) == '-')) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot end with a period or dash");
-    }
-
-    boolean isIPv4 = true;
-    char prev = (char) 0;
-
-    for (int index = 0; index < resName.length(); index++) {
-      char currChar = resName.charAt(index);
-
-      if (currChar != '.') {
-        isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4;
-      }
-
-      if (currChar > 'A' && currChar < 'Z') {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name does not support uppercase characters");
-      }
-
-      if ((currChar != '.') && (currChar != '-')) {
-        if ((currChar < '0') || (currChar > '9' && currChar < 'a') ||
-            (currChar > 'z')) {
-          throw new IllegalArgumentException("Bucket or Volume name has an " +
-              "unsupported character : " +
-              currChar);
-        }
-      }
-
-      if ((prev == '.') && (currChar == '.')) {
-        throw new IllegalArgumentException("Bucket or Volume name should not " +
-            "have two contiguous periods");
-      }
-
-      if ((prev == '-') && (currChar == '.')) {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name should not have period after dash");
-      }
-
-      if ((prev == '.') && (currChar == '-')) {
-        throw new IllegalArgumentException(
-            "Bucket or Volume name should not have dash after period");
-      }
-      prev = currChar;
-    }
-
-    if (isIPv4) {
-      throw new IllegalArgumentException(
-          "Bucket or Volume name cannot be an IPv4 address or all numeric");
-    }
-  }
-
-  /**
-   * verifies that bucket / volume name is a valid DNS name.
-   *
-   * @param resourceNames Array of bucket / volume names to be verified.
-   */
-  public static void verifyResourceName(String... resourceNames) {
-    for (String resourceName : resourceNames) {
-      HddsClientUtils.verifyResourceName(resourceName);
-    }
-  }
-
-  /**
-   * Checks that object parameters passed as reference is not null.
-   *
-   * @param references Array of object references to be checked.
-   * @param <T>
-   */
-  public static <T> void checkNotNull(T... references) {
-    for (T ref: references) {
-      Preconditions.checkNotNull(ref);
-    }
-  }
-
-  /**
-   * Returns the cache value to be used for list calls.
-   * @param conf Configuration object
-   * @return list cache size
-   */
-  public static int getListCacheSize(Configuration conf) {
-    return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
-        OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
-  }
-
-  /**
-   * @return a default instance of {@link CloseableHttpClient}.
-   */
-  public static CloseableHttpClient newHttpClient() {
-    return HddsClientUtils.newHttpClient(new Configuration());
-  }
-
-  /**
-   * Returns a {@link CloseableHttpClient} configured by given configuration.
-   * If conf is null, returns a default instance.
-   *
-   * @param conf configuration
-   * @return a {@link CloseableHttpClient} instance.
-   */
-  public static CloseableHttpClient newHttpClient(Configuration conf) {
-    long socketTimeout = OzoneConfigKeys
-        .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
-    long connectionTimeout = OzoneConfigKeys
-        .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
-    if (conf != null) {
-      socketTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      connectionTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-    }
-
-    CloseableHttpClient client = HttpClients.custom()
-        .setDefaultRequestConfig(
-            RequestConfig.custom()
-                .setSocketTimeout(Math.toIntExact(socketTimeout))
-                .setConnectTimeout(Math.toIntExact(connectionTimeout))
-                .build())
-        .build();
-    return client;
-  }
-
-  /**
-   * Returns the maximum no of outstanding async requests to be handled by
-   * Standalone and Ratis client.
-   */
-  public static int getMaxOutstandingRequests(Configuration config) {
-    return config
-        .getInt(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS,
-            ScmConfigKeys
-                .SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT);
-  }
-}

+ 0 - 23
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-/**
- * Client facing classes for the container operations.
- */

+ 0 - 23
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * Classes for different type of container service client.
- */

+ 0 - 279
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java

@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkResponseProto;
-import org.apache.hadoop.hdds.client.BlockID;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * An {@link InputStream} used by the REST service in combination with the
- * SCMClient to read the value of a key from a sequence
- * of container chunks.  All bytes of the key value are stored in container
- * chunks.  Each chunk may contain multiple underlying {@link ByteBuffer}
- * instances.  This class encapsulates all state management for iterating
- * through the sequence of chunks and the sequence of buffers within each chunk.
- */
-public class ChunkInputStream extends InputStream implements Seekable {
-
-  private static final int EOF = -1;
-
-  private final BlockID blockID;
-  private final String traceID;
-  private XceiverClientManager xceiverClientManager;
-  private XceiverClientSpi xceiverClient;
-  private List<ChunkInfo> chunks;
-  private int chunkIndex;
-  private long[] chunkOffset;
-  private List<ByteBuffer> buffers;
-  private int bufferIndex;
-
-  /**
-   * Creates a new ChunkInputStream.
-   *
-   * @param blockID block ID of the chunk
-   * @param xceiverClientManager client manager that controls client
-   * @param xceiverClient client to perform container calls
-   * @param chunks list of chunks to read
-   * @param traceID container protocol call traceID
-   */
-  public ChunkInputStream(
-      BlockID blockID, XceiverClientManager xceiverClientManager,
-      XceiverClientSpi xceiverClient, List<ChunkInfo> chunks, String traceID) {
-    this.blockID = blockID;
-    this.traceID = traceID;
-    this.xceiverClientManager = xceiverClientManager;
-    this.xceiverClient = xceiverClient;
-    this.chunks = chunks;
-    this.chunkIndex = -1;
-    // chunkOffset[i] stores offset at which chunk i stores data in
-    // ChunkInputStream
-    this.chunkOffset = new long[this.chunks.size()];
-    initializeChunkOffset();
-    this.buffers = null;
-    this.bufferIndex = 0;
-  }
-
-  private void initializeChunkOffset() {
-    int tempOffset = 0;
-    for (int i = 0; i < chunks.size(); i++) {
-      chunkOffset[i] = tempOffset;
-      tempOffset += chunks.get(i).getLen();
-    }
-  }
-
-  @Override
-  public synchronized int read()
-      throws IOException {
-    checkOpen();
-    int available = prepareRead(1);
-    return available == EOF ? EOF :
-        Byte.toUnsignedInt(buffers.get(bufferIndex).get());
-  }
-
-  @Override
-  public synchronized int read(byte[] b, int off, int len) throws IOException {
-    // According to the JavaDocs for InputStream, it is recommended that
-    // subclasses provide an override of bulk read if possible for performance
-    // reasons.  In addition to performance, we need to do it for correctness
-    // reasons.  The Ozone REST service uses PipedInputStream and
-    // PipedOutputStream to relay HTTP response data between a Jersey thread and
-    // a Netty thread.  It turns out that PipedInputStream/PipedOutputStream
-    // have a subtle dependency (bug?) on the wrapped stream providing separate
-    // implementations of single-byte read and bulk read.  Without this, get key
-    // responses might close the connection before writing all of the bytes
-    // advertised in the Content-Length.
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if (off < 0 || len < 0 || len > b.length - off) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return 0;
-    }
-    checkOpen();
-    int total = 0;
-    while (len > 0) {
-      int available = prepareRead(len);
-      if (available == EOF) {
-        return total != 0 ? total : EOF;
-      }
-      buffers.get(bufferIndex).get(b, off + total, available);
-      len -= available;
-      total += available;
-    }
-    return total;
-  }
-
-  @Override
-  public synchronized void close() {
-    if (xceiverClientManager != null && xceiverClient != null) {
-      xceiverClientManager.releaseClient(xceiverClient);
-      xceiverClientManager = null;
-      xceiverClient = null;
-    }
-  }
-
-  /**
-   * Checks if the stream is open.  If not, throws an exception.
-   *
-   * @throws IOException if stream is closed
-   */
-  private synchronized void checkOpen() throws IOException {
-    if (xceiverClient == null) {
-      throw new IOException("ChunkInputStream has been closed.");
-    }
-  }
-
-  /**
-   * Prepares to read by advancing through chunks and buffers as needed until it
-   * finds data to return or encounters EOF.
-   *
-   * @param len desired length of data to read
-   * @return length of data available to read, possibly less than desired length
-   */
-  private synchronized int prepareRead(int len) throws IOException {
-    for (;;) {
-      if (chunks == null || chunks.isEmpty()) {
-        // This must be an empty key.
-        return EOF;
-      } else if (buffers == null) {
-        // The first read triggers fetching the first chunk.
-        readChunkFromContainer();
-      } else if (!buffers.isEmpty() &&
-          buffers.get(bufferIndex).hasRemaining()) {
-        // Data is available from the current buffer.
-        ByteBuffer bb = buffers.get(bufferIndex);
-        return len > bb.remaining() ? bb.remaining() : len;
-      } else if (!buffers.isEmpty() &&
-          !buffers.get(bufferIndex).hasRemaining() &&
-          bufferIndex < buffers.size() - 1) {
-        // There are additional buffers available.
-        ++bufferIndex;
-      } else if (chunkIndex < chunks.size() - 1) {
-        // There are additional chunks available.
-        readChunkFromContainer();
-      } else {
-        // All available input has been consumed.
-        return EOF;
-      }
-    }
-  }
-
-  /**
-   * Attempts to read the chunk at the specified offset in the chunk list.  If
-   * successful, then the data of the read chunk is saved so that its bytes can
-   * be returned from subsequent read calls.
-   *
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  private synchronized void readChunkFromContainer() throws IOException {
-    // On every chunk read chunkIndex should be increased so as to read the
-    // next chunk
-    chunkIndex += 1;
-    final ReadChunkResponseProto readChunkResponse;
-    final ChunkInfo chunkInfo = chunks.get(chunkIndex);
-    try {
-      readChunkResponse = ContainerProtocolCalls
-          .readChunk(xceiverClient, chunkInfo, blockID, traceID);
-    } catch (IOException e) {
-      throw new IOException("Unexpected OzoneException: " + e.toString(), e);
-    }
-    ByteString byteString = readChunkResponse.getData();
-    if (byteString.size() != chunkInfo.getLen()) {
-      // Bytes read from chunk should be equal to chunk size.
-      throw new IOException(String
-          .format("Inconsistent read for chunk=%s len=%d bytesRead=%d",
-              chunkInfo.getChunkName(), chunkInfo.getLen(), byteString.size()));
-    }
-    buffers = byteString.asReadOnlyByteBufferList();
-    bufferIndex = 0;
-  }
-
-  @Override
-  public synchronized void seek(long pos) throws IOException {
-    if (pos < 0 || (chunks.size() == 0 && pos > 0)
-        || pos >= chunkOffset[chunks.size() - 1] + chunks.get(chunks.size() - 1)
-        .getLen()) {
-      throw new EOFException("EOF encountered pos: " + pos + " container key: "
-          + blockID.getLocalID());
-    }
-    if (chunkIndex == -1) {
-      chunkIndex = Arrays.binarySearch(chunkOffset, pos);
-    } else if (pos < chunkOffset[chunkIndex]) {
-      chunkIndex = Arrays.binarySearch(chunkOffset, 0, chunkIndex, pos);
-    } else if (pos >= chunkOffset[chunkIndex] + chunks.get(chunkIndex)
-        .getLen()) {
-      chunkIndex =
-          Arrays.binarySearch(chunkOffset, chunkIndex + 1, chunks.size(), pos);
-    }
-    if (chunkIndex < 0) {
-      // Binary search returns -insertionPoint - 1  if element is not present
-      // in the array. insertionPoint is the point at which element would be
-      // inserted in the sorted array. We need to adjust the chunkIndex
-      // accordingly so that chunkIndex = insertionPoint - 1
-      chunkIndex = -chunkIndex -2;
-    }
-    // adjust chunkIndex so that readChunkFromContainer reads the correct chunk
-    chunkIndex -= 1;
-    readChunkFromContainer();
-    adjustBufferIndex(pos);
-  }
-
-  private void adjustBufferIndex(long pos) {
-    long tempOffest = chunkOffset[chunkIndex];
-    for (int i = 0; i < buffers.size(); i++) {
-      if (pos - tempOffest >= buffers.get(i).capacity()) {
-        tempOffest += buffers.get(i).capacity();
-      } else {
-        bufferIndex = i;
-        break;
-      }
-    }
-    buffers.get(bufferIndex).position((int) (pos - tempOffest));
-  }
-
-  @Override
-  public synchronized long getPos() throws IOException {
-    return chunkIndex == -1 ? 0 :
-        chunkOffset[chunkIndex] + buffers.get(bufferIndex).position();
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-}

+ 0 - 236
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java

@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.client.BlockID;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
-    .putBlock;
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
-    .writeChunk;
-
-/**
- * An {@link OutputStream} used by the REST service in combination with the
- * SCMClient to write the value of a key to a sequence
- * of container chunks.  Writes are buffered locally and periodically written to
- * the container as a new chunk.  In order to preserve the semantics that
- * replacement of a pre-existing key is atomic, each instance of the stream has
- * an internal unique identifier.  This unique identifier and a monotonically
- * increasing chunk index form a composite key that is used as the chunk name.
- * After all data is written, a putKey call creates or updates the corresponding
- * container key, and this call includes the full list of chunks that make up
- * the key data.  The list of chunks is updated all at once.  Therefore, a
- * concurrent reader never can see an intermediate state in which different
- * chunks of data from different versions of the key data are interleaved.
- * This class encapsulates all state management for buffering and writing
- * through to the container.
- */
-public class ChunkOutputStream extends OutputStream {
-
-  private final BlockID blockID;
-  private final String key;
-  private final String traceID;
-  private final BlockData.Builder containerBlockData;
-  private XceiverClientManager xceiverClientManager;
-  private XceiverClientSpi xceiverClient;
-  private ByteBuffer buffer;
-  private final String streamId;
-  private int chunkIndex;
-  private int chunkSize;
-
-  /**
-   * Creates a new ChunkOutputStream.
-   *
-   * @param blockID block ID
-   * @param key chunk key
-   * @param xceiverClientManager client manager that controls client
-   * @param xceiverClient client to perform container calls
-   * @param traceID container protocol call args
-   * @param chunkSize chunk size
-   */
-  public ChunkOutputStream(BlockID blockID, String key,
-       XceiverClientManager xceiverClientManager,
-       XceiverClientSpi xceiverClient, String traceID, int chunkSize) {
-    this.blockID = blockID;
-    this.key = key;
-    this.traceID = traceID;
-    this.chunkSize = chunkSize;
-    KeyValue keyValue = KeyValue.newBuilder()
-        .setKey("TYPE").setValue("KEY").build();
-    this.containerBlockData = BlockData.newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .addMetadata(keyValue);
-    this.xceiverClientManager = xceiverClientManager;
-    this.xceiverClient = xceiverClient;
-    this.buffer = ByteBuffer.allocate(chunkSize);
-    this.streamId = UUID.randomUUID().toString();
-    this.chunkIndex = 0;
-  }
-
-  public ByteBuffer getBuffer() {
-    return buffer;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    checkOpen();
-    int rollbackPosition = buffer.position();
-    int rollbackLimit = buffer.limit();
-    buffer.put((byte)b);
-    if (buffer.position() == chunkSize) {
-      flushBufferToChunk(rollbackPosition, rollbackLimit);
-    }
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len)
-      throws IOException {
-    if (b == null) {
-      throw new NullPointerException();
-    }
-    if ((off < 0) || (off > b.length) || (len < 0) ||
-        ((off + len) > b.length) || ((off + len) < 0)) {
-      throw new IndexOutOfBoundsException();
-    }
-    if (len == 0) {
-      return;
-    }
-    checkOpen();
-    while (len > 0) {
-      int writeLen = Math.min(chunkSize - buffer.position(), len);
-      int rollbackPosition = buffer.position();
-      int rollbackLimit = buffer.limit();
-      buffer.put(b, off, writeLen);
-      if (buffer.position() == chunkSize) {
-        flushBufferToChunk(rollbackPosition, rollbackLimit);
-      }
-      off += writeLen;
-      len -= writeLen;
-    }
-  }
-
-  @Override
-  public void flush() throws IOException {
-    checkOpen();
-    if (buffer.position() > 0) {
-      int rollbackPosition = buffer.position();
-      int rollbackLimit = buffer.limit();
-      flushBufferToChunk(rollbackPosition, rollbackLimit);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (xceiverClientManager != null && xceiverClient != null
-        && buffer != null) {
-      if (buffer.position() > 0) {
-        writeChunkToContainer();
-      }
-      try {
-        putBlock(xceiverClient, containerBlockData.build(), traceID);
-      } catch (IOException e) {
-        throw new IOException(
-            "Unexpected Storage Container Exception: " + e.toString(), e);
-      } finally {
-        cleanup();
-      }
-    }
-  }
-
-  public void cleanup() {
-    xceiverClientManager.releaseClient(xceiverClient);
-    xceiverClientManager = null;
-    xceiverClient = null;
-    buffer = null;
-  }
-
-  /**
-   * Checks if the stream is open.  If not, throws an exception.
-   *
-   * @throws IOException if stream is closed
-   */
-  private void checkOpen() throws IOException {
-    if (xceiverClient == null) {
-      throw new IOException("ChunkOutputStream has been closed.");
-    }
-  }
-
-  /**
-   * Attempts to flush buffered writes by writing a new chunk to the container.
-   * If successful, then clears the buffer to prepare to receive writes for a
-   * new chunk.
-   *
-   * @param rollbackPosition position to restore in buffer if write fails
-   * @param rollbackLimit limit to restore in buffer if write fails
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  private void flushBufferToChunk(int rollbackPosition,
-      int rollbackLimit) throws IOException {
-    boolean success = false;
-    try {
-      writeChunkToContainer();
-      success = true;
-    } finally {
-      if (success) {
-        buffer.clear();
-      } else {
-        buffer.position(rollbackPosition);
-        buffer.limit(rollbackLimit);
-      }
-    }
-  }
-
-  /**
-   * Writes buffered data as a new chunk to the container and saves chunk
-   * information to be used later in putKey call.
-   *
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  private void writeChunkToContainer() throws IOException {
-    buffer.flip();
-    ByteString data = ByteString.copyFrom(buffer);
-    ChunkInfo chunk = ChunkInfo
-        .newBuilder()
-        .setChunkName(
-            DigestUtils.md5Hex(key) + "_stream_"
-                + streamId + "_chunk_" + ++chunkIndex)
-        .setOffset(0)
-        .setLen(data.size())
-        .build();
-    try {
-      writeChunk(xceiverClient, chunk, blockID, data, traceID);
-    } catch (IOException e) {
-      throw new IOException(
-          "Unexpected Storage Container Exception: " + e.toString(), e);
-    }
-    containerBlockData.addChunks(chunk);
-  }
-}

+ 0 - 23
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-/**
- * Low level IO streams to upload/download chunks from container service.
- */

+ 0 - 28
hadoop-hdds/common/dev-support/findbugsExcludeFile.xml

@@ -1,28 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
-  </Match>
-  <Match>
-    <Package name="org.apache.hadoop.hdds.protocol.datanode.proto"/>
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.hdds.cli.GenericCli"></Class>
-    <Bug pattern="DM_EXIT" />
-  </Match>
-</FindBugsFilter>

+ 0 - 250
hadoop-hdds/common/pom.xml

@@ -1,250 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.3.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-common</artifactId>
-  <version>0.3.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Common</description>
-  <name>Apache Hadoop HDDS Common</name>
-  <packaging>jar</packaging>
-
-  <properties>
-    <hdds.version>0.3.0-SNAPSHOT</hdds.version>
-    <log4j2.version>2.11.0</log4j2.version>
-    <disruptor.version>3.4.2</disruptor.version>
-    <declared.hdds.version>${hdds.version}</declared.hdds.version>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.fusesource.leveldbjni</groupId>
-      <artifactId>leveldbjni-all</artifactId>
-    </dependency>
-
-    <dependency>
-      <artifactId>ratis-server</artifactId>
-      <groupId>org.apache.ratis</groupId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>io.dropwizard.metrics</groupId>
-          <artifactId>metrics-core</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-netty</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-grpc</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.errorprone</groupId>
-      <artifactId>error_prone_annotations</artifactId>
-      <version>2.2.0</version>
-      <optional>true</optional>
-    </dependency>
-
-    <dependency>
-      <groupId>org.rocksdb</groupId>
-      <artifactId>rocksdbjni</artifactId>
-      <version>5.14.2</version>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.logging.log4j</groupId>
-      <artifactId>log4j-api</artifactId>
-      <version>${log4j2.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.logging.log4j</groupId>
-      <artifactId>log4j-core</artifactId>
-      <version>${log4j2.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>com.lmax</groupId>
-      <artifactId>disruptor</artifactId>
-      <version>${disruptor.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-pool2</artifactId>
-      <version>2.6.0</version>
-    </dependency>
-
-  </dependencies>
-
-  <build>
-    <resources>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <excludes>
-          <exclude>hdds-version-info.properties</exclude>
-        </excludes>
-        <filtering>false</filtering>
-      </resource>
-      <resource>
-        <directory>${basedir}/src/main/resources</directory>
-        <includes>
-          <include>hdds-version-info.properties</include>
-        </includes>
-        <filtering>true</filtering>
-      </resource>
-    </resources>
-    <extensions>
-      <extension>
-        <groupId>kr.motd.maven</groupId>
-        <artifactId>os-maven-plugin</artifactId>
-        <version>${os-maven-plugin.version}</version>
-      </extension>
-    </extensions>
-    <plugins>
-      <plugin>
-        <groupId>org.xolstice.maven.plugins</groupId>
-        <artifactId>protobuf-maven-plugin</artifactId>
-        <version>${protobuf-maven-plugin.version}</version>
-        <extensions>true</extensions>
-        <configuration>
-          <protocArtifact>
-            com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
-          </protocArtifact>
-          <protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
-          <includes>
-            <include>DatanodeContainerProtocol.proto</include>
-          </includes>
-          <outputDirectory>target/generated-sources/java</outputDirectory>
-          <clearOutputDirectory>false</clearOutputDirectory>
-        </configuration>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-              <goals>
-                <goal>compile</goal>
-                <goal>test-compile</goal>
-                <goal>compile-custom</goal>
-                <goal>test-compile-custom</goal>
-              </goals>
-              <configuration>
-                <pluginId>grpc-java</pluginId>
-                <pluginArtifact>
-                  io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
-                </pluginArtifact>
-              </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>generate-sources</phase>
-            <configuration>
-              <tasks>
-                <replace token="com.google.protobuf" value="org.apache.ratis.shaded.com.google.protobuf"
-                  dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
-                </replace>
-                <replace token="io.grpc" value="org.apache.ratis.shaded.io.grpc"
-                  dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
-                </replace>
-              </tasks>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <executions>
-          <execution>
-            <id>version-info</id>
-            <phase>generate-resources</phase>
-            <goals>
-              <goal>version-info</goal>
-            </goals>
-            <configuration>
-              <source>
-                <directory>${basedir}/../</directory>
-                <includes>
-                  <include>*/src/main/java/**/*.java</include>
-                  <include>*/src/main/proto/*.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>
-                  ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
-                </param>
-                <param>
-                  ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
-                </param>
-                <param>
-                  ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
-                </param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>StorageContainerLocationProtocol.proto</include>
-                  <include>hdds.proto</include>
-                  <include>ScmBlockLocationProtocol.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>

+ 0 - 157
hadoop-hdds/common/src/main/conf/log4j.properties

@@ -1,157 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=ALL
-
-# Null Appender
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Rolling File Appender - cap space usage at 5gb.
-#
-hadoop.log.maxfilesize=256MB
-hadoop.log.maxbackupindex=20
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollover at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to log normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=DEBUG
-
-#
-#Security appender
-#
-hadoop.security.logger=INFO,NullAppender
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# Daily Rolling Security appender
-#
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-
-# Custom Logging levels
-# AWS SDK & S3A FileSystem
-#log4j.logger.com.amazonaws=ERROR
-log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
-#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-
-log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-# Do not log into datanode logs. Remove this line to have single log.
-log4j.additivity.org.apache.hadoop.ozone=false
-
-# For development purposes, log both to console and log file.
-log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-log4j.appender.OZONE.Threshold=info
-log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
- %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-# Real ozone logger that writes to ozone.log
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-log4j.appender.FILE.Threshold=debug
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-%m%n
-
-# Log levels of third-party libraries
-log4j.logger.org.apache.commons.beanutils=WARN
-
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR

+ 0 - 97
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java

@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds;
-
-import org.apache.hadoop.utils.db.DBProfile;
-
-/**
- * This class contains constants for configuration keys and default values
- * used in hdds.
- */
-public final class HddsConfigKeys {
-
-  /**
-   * Do not instantiate.
-   */
-  private HddsConfigKeys() {
-  }
-
-  public static final String HDDS_HEARTBEAT_INTERVAL =
-      "hdds.heartbeat.interval";
-  public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT =
-      "30s";
-
-  public static final String HDDS_NODE_REPORT_INTERVAL =
-      "hdds.node.report.interval";
-  public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT =
-      "60s";
-
-  public static final String HDDS_CONTAINER_REPORT_INTERVAL =
-      "hdds.container.report.interval";
-  public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
-      "60s";
-
-  public static final String HDDS_PIPELINE_REPORT_INTERVAL =
-          "hdds.pipeline.report.interval";
-  public static final String HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT =
-          "60s";
-
-  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
-      "hdds.command.status.report.interval";
-  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
-      "60s";
-
-  public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT =
-      "hdds.container.action.max.limit";
-  public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT =
-      20;
-
-  public static final String HDDS_PIPELINE_ACTION_MAX_LIMIT =
-      "hdds.pipeline.action.max.limit";
-  public static final int HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT =
-      20;
-
-  // Configuration to allow volume choosing policy.
-  public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
-      "hdds.datanode.volume.choosing.policy";
-
-  // DB Profiles used by ROCKDB instances.
-  public static final String HDDS_DB_PROFILE = "hdds.db.profile";
-  public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.SSD;
-
-  // Once a container usage crosses this threshold, it is eligible for
-  // closing.
-  public static final String HDDS_CONTAINER_CLOSE_THRESHOLD =
-      "hdds.container.close.threshold";
-  public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
-
-  public static final String HDDS_SCM_CHILLMODE_ENABLED =
-      "hdds.scm.chillmode.enabled";
-  public static final boolean HDDS_SCM_CHILLMODE_ENABLED_DEFAULT = true;
-
-  // % of containers which should have at least one reported replica
-  // before SCM comes out of chill mode.
-  public static final String HDDS_SCM_CHILLMODE_THRESHOLD_PCT =
-      "hdds.scm.chillmode.threshold.pct";
-  public static final double HDDS_SCM_CHILLMODE_THRESHOLD_PCT_DEFAULT = 0.99;
-
-  public static final String HDDS_LOCK_MAX_CONCURRENCY =
-      "hdds.lock.max.concurrency";
-  public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
-
-}

+ 0 - 53
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java

@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds;
-
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * HDDS Id generator.
- */
-public final class HddsIdFactory {
-  private HddsIdFactory() {
-  }
-
-  private static final AtomicLong LONG_COUNTER = new AtomicLong(
-      System.currentTimeMillis());
-
-  /**
-   * Returns an incrementing long. This class doesn't
-   * persist initial value for long Id's, so incremental id's after restart
-   * may collide with previously generated Id's.
-   *
-   * @return long
-   */
-  public static long getLongId() {
-    return LONG_COUNTER.incrementAndGet();
-  }
-
-  /**
-   * Returns a uuid.
-   *
-   * @return UUID.
-   */
-  public static UUID getUUId() {
-    return UUID.randomUUID();
-  }
-
-}

+ 0 - 351
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java

@@ -1,351 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Strings;
-import com.google.common.net.HostAndPort;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.nio.file.Paths;
-import java.util.Collection;
-import java.util.HashSet;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys
-    .DFS_DATANODE_DNS_INTERFACE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys
-    .DFS_DATANODE_DNS_NAMESERVER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
-
-/**
- * HDDS specific stateless utility functions.
- */
-public final class HddsUtils {
-
-
-  private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
-
-  /**
-   * The service ID of the solitary Ozone SCM service.
-   */
-  public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
-  public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
-      "OzoneScmServiceInstance";
-
-  private static final int NO_PORT = -1;
-
-  private HddsUtils() {
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmAddressForClients(Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
-              + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
-              + "details"
-              + " on configuring Ozone.");
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .or(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM for block service. If
-   * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   * @throws IllegalArgumentException if configuration is not defined.
-   */
-  public static InetSocketAddress getScmAddressForBlockClients(
-      Configuration conf) {
-    Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      host = getHostNameFromConfigKeys(conf,
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-      if (!host.isPresent()) {
-        throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
-                + " must be defined. See"
-                + " https://wiki.apache.org/hadoop/Ozone#Configuration"
-                + " for details on configuring Ozone.");
-      }
-    }
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(host.get() + ":" + port
-        .or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the hostname, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf  - Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first hostname component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<String> getHostNameFromConfigKeys(Configuration conf,
-      String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<String> hostName = getHostName(value);
-      if (hostName.isPresent()) {
-        return hostName;
-      }
-    }
-    return Optional.absent();
-  }
-
-  /**
-   * Gets the hostname or Indicates that it is absent.
-   * @param value host or host:port
-   * @return hostname
-   */
-  public static Optional<String> getHostName(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.absent();
-    }
-    return Optional.of(HostAndPort.fromString(value).getHostText());
-  }
-
-  /**
-   * Gets the port if there is one, throws otherwise.
-   * @param value  String in host:port format.
-   * @return Port
-   */
-  public static Optional<Integer> getHostPort(String value) {
-    if ((value == null) || value.isEmpty()) {
-      return Optional.absent();
-    }
-    int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT);
-    if (port == NO_PORT) {
-      return Optional.absent();
-    } else {
-      return Optional.of(port);
-    }
-  }
-
-  /**
-   * Retrieve the port number, trying the supplied config keys in order.
-   * Each config value may be absent, or if present in the format
-   * host:port (the :port part is optional).
-   *
-   * @param conf Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first port number component found from the given keys, or absent.
-   * @throws IllegalArgumentException if any values are not in the 'host'
-   *             or host:port format.
-   */
-  public static Optional<Integer> getPortNumberFromConfigKeys(
-      Configuration conf, String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      final Optional<Integer> hostPort = getHostPort(value);
-      if (hostPort.isPresent()) {
-        return hostPort;
-      }
-    }
-    return Optional.absent();
-  }
-
-  /**
-   * Retrieve the socket addresses of all storage container managers.
-   *
-   * @param conf
-   * @return A collection of SCM addresses
-   * @throws IllegalArgumentException If the configuration is invalid
-   */
-  public static Collection<InetSocketAddress> getSCMAddresses(
-      Configuration conf) throws IllegalArgumentException {
-    Collection<InetSocketAddress> addresses =
-        new HashSet<InetSocketAddress>();
-    Collection<String> names =
-        conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
-    if (names == null || names.isEmpty()) {
-      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
-          + " need to be a set of valid DNS names or IP addresses."
-          + " Null or empty address list found.");
-    }
-
-    final com.google.common.base.Optional<Integer>
-        defaultPort =  com.google.common.base.Optional.of(ScmConfigKeys
-        .OZONE_SCM_DEFAULT_PORT);
-    for (String address : names) {
-      com.google.common.base.Optional<String> hostname =
-          getHostName(address);
-      if (!hostname.isPresent()) {
-        throw new IllegalArgumentException("Invalid hostname for SCM: "
-            + hostname);
-      }
-      com.google.common.base.Optional<Integer> port =
-          getHostPort(address);
-      InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
-          port.or(defaultPort.get()));
-      addresses.add(addr);
-    }
-    return addresses;
-  }
-
-  public static boolean isHddsEnabled(Configuration conf) {
-    String securityEnabled =
-        conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-            "simple");
-    boolean securityAuthorizationEnabled = conf.getBoolean(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
-
-    if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
-      LOG.error("Ozone is not supported in a security enabled cluster. ");
-      return false;
-    } else {
-      return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
-    }
-  }
-
-
-  /**
-   * Get the path for datanode id file.
-   *
-   * @param conf - Configuration
-   * @return the path of datanode id as string
-   */
-  public static String getDatanodeIdFilePath(Configuration conf) {
-    String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
-    if (dataNodeIDPath == null) {
-      String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
-      if (Strings.isNullOrEmpty(metaPath)) {
-        // this means meta data is not found, in theory should not happen at
-        // this point because should've failed earlier.
-        throw new IllegalArgumentException("Unable to locate meta data" +
-            "directory when getting datanode id path");
-      }
-      dataNodeIDPath = Paths.get(metaPath,
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
-    }
-    return dataNodeIDPath;
-  }
-
-  /**
-   * Returns the hostname for this datanode. If the hostname is not
-   * explicitly configured in the given config, then it is determined
-   * via the DNS class.
-   *
-   * @param conf Configuration
-   *
-   * @return the hostname (NB: may not be a FQDN)
-   * @throws UnknownHostException if the dfs.datanode.dns.interface
-   *    option is used and the hostname can not be determined
-   */
-  public static String getHostName(Configuration conf)
-      throws UnknownHostException {
-    String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
-    if (name == null) {
-      String dnsInterface = conf.get(
-          CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
-      String nameServer = conf.get(
-          CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
-      boolean fallbackToHosts = false;
-
-      if (dnsInterface == null) {
-        // Try the legacy configuration keys.
-        dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
-        nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
-      } else {
-        // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
-        // resolution if DNS fails. We will not use hosts file resolution
-        // by default to avoid breaking existing clusters.
-        fallbackToHosts = true;
-      }
-
-      name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
-    }
-    return name;
-  }
-
-  /**
-   * Checks if the container command is read only or not.
-   * @param proto ContainerCommand Request proto
-   * @return True if its readOnly , false otherwise.
-   */
-  public static boolean isReadOnly(
-      ContainerProtos.ContainerCommandRequestProto proto) {
-    switch (proto.getCmdType()) {
-    case ReadContainer:
-    case ReadChunk:
-    case ListBlock:
-    case GetBlock:
-    case GetSmallFile:
-    case ListContainer:
-    case ListChunk:
-    case GetCommittedBlockLength:
-      return true;
-    case CloseContainer:
-    case WriteChunk:
-    case UpdateContainer:
-    case CompactChunk:
-    case CreateContainer:
-    case DeleteChunk:
-    case DeleteContainer:
-    case DeleteBlock:
-    case PutBlock:
-    case PutSmallFile:
-    default:
-      return false;
-    }
-  }
-
-}

+ 0 - 100
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java

@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import com.google.common.annotations.VisibleForTesting;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.RunLast;
-
-/**
- * This is a generic parent class for all the ozone related cli tools.
- */
-public class GenericCli implements Callable<Void>, GenericParentCommand {
-
-  @Option(names = {"--verbose"},
-      description = "More verbose output. Show the stack trace of the errors.")
-  private boolean verbose;
-
-  @Option(names = {"-D", "--set"})
-  private Map<String, String> configurationOverrides = new HashMap<>();
-
-  private final CommandLine cmd;
-
-  public GenericCli() {
-    cmd = new CommandLine(this);
-  }
-
-  public void run(String[] argv) {
-    try {
-      execute(argv);
-    } catch (ExecutionException ex) {
-      printError(ex.getCause());
-      System.exit(-1);
-    }
-  }
-
-  @VisibleForTesting
-  public void execute(String[] argv) {
-    cmd.parseWithHandler(new RunLast(), argv);
-  }
-
-  private void printError(Throwable error) {
-    if (verbose) {
-      error.printStackTrace(System.err);
-    } else {
-      System.err.println(error.getMessage().split("\n")[0]);
-    }
-    if(error instanceof MissingSubcommandException){
-      System.err.println(((MissingSubcommandException) error).getUsage());
-    }
-  }
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(cmd.getUsageMessage());
-  }
-
-  public OzoneConfiguration createOzoneConfiguration() {
-    OzoneConfiguration ozoneConf = new OzoneConfiguration();
-    if (configurationOverrides != null) {
-      for (Entry<String, String> entry : configurationOverrides.entrySet()) {
-        ozoneConf
-            .set(entry.getKey(), entry.getValue());
-      }
-    }
-    return ozoneConf;
-  }
-
-  @VisibleForTesting
-  public picocli.CommandLine getCmd() {
-    return cmd;
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return verbose;
-  }
-}

+ 0 - 25
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java

@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-/**
- * Interface to access the higher level parameters.
- */
-public interface GenericParentCommand {
-
-  boolean isVerbose();
-}

+ 0 - 35
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java

@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import org.apache.hadoop.utils.HddsVersionInfo;
-
-import picocli.CommandLine.IVersionProvider;
-
-/**
- * Version provider for the CLI interface.
- */
-public class HddsVersionProvider implements IVersionProvider {
-  @Override
-  public String[] getVersion() throws Exception {
-    String[] result = new String[] {
-        HddsVersionInfo.getBuildVersion()
-    };
-    return result;
-  }
-}

+ 0 - 35
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java

@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-/**
- * Exception to throw if subcommand is not selected but required.
- */
-public class MissingSubcommandException extends RuntimeException {
-
-  private String usage;
-
-  public MissingSubcommandException(String usage) {
-    super("Incomplete command");
-    this.usage = usage;
-  }
-
-  public String getUsage() {
-    return usage;
-  }
-}

+ 0 - 22
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Generic helper class to make instantiate picocli based cli tools.
- */
-package org.apache.hadoop.hdds.cli;

+ 0 - 90
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java

@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.client;
-
-import org.apache.commons.lang3.builder.ToStringBuilder;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.Objects;
-
-/**
- * BlockID of ozone (containerID  localID).
- */
-public class BlockID {
-  private long containerID;
-  private long localID;
-
-  public BlockID(long containerID, long localID) {
-    this.containerID = containerID;
-    this.localID = localID;
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public long getLocalID() {
-    return localID;
-  }
-
-  @Override
-  public String toString() {
-    return new ToStringBuilder(this).
-        append("containerID", containerID).
-        append("localID", localID).
-        toString();
-  }
-
-  public HddsProtos.BlockID getProtobuf() {
-    return HddsProtos.BlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
-  }
-
-  public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) {
-    return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID());
-  }
-
-  public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
-    return ContainerProtos.DatanodeBlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
-  }
-
-  public static BlockID getFromProtobuf(
-      ContainerProtos.DatanodeBlockID blockID) {
-    return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID());
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    BlockID blockID = (BlockID) o;
-    return containerID == blockID.containerID && localID == blockID.localID;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(containerID, localID);
-  }
-}

+ 0 - 203
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java

@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-
-
-/**
- * represents an OzoneQuota Object that can be applied to
- * a storage volume.
- */
-public class OzoneQuota {
-
-  public static final String OZONE_QUOTA_BYTES = "BYTES";
-  public static final String OZONE_QUOTA_MB = "MB";
-  public static final String OZONE_QUOTA_GB = "GB";
-  public static final String OZONE_QUOTA_TB = "TB";
-
-  private Units unit;
-  private long size;
-
-  /** Quota Units.*/
-  public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
-
-  /**
-   * Returns size.
-   *
-   * @return long
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Returns Units.
-   *
-   * @return Unit in MB, GB or TB
-   */
-  public Units getUnit() {
-    return unit;
-  }
-
-  /**
-   * Constructs a default Quota object.
-   */
-  public OzoneQuota() {
-    this.size = 0;
-    this.unit = Units.UNDEFINED;
-  }
-
-  /**
-   * Constructor for Ozone Quota.
-   *
-   * @param size Long Size
-   * @param unit MB, GB  or TB
-   */
-  public OzoneQuota(long size, Units unit) {
-    this.size = size;
-    this.unit = unit;
-  }
-
-  /**
-   * Formats a quota as a string.
-   *
-   * @param quota the quota to format
-   * @return string representation of quota
-   */
-  public static String formatQuota(OzoneQuota quota) {
-    return String.valueOf(quota.size) + quota.unit;
-  }
-
-  /**
-   * Parses a user provided string and returns the
-   * Quota Object.
-   *
-   * @param quotaString Quota String
-   *
-   * @return OzoneQuota object
-   *
-   * @throws IllegalArgumentException
-   */
-  public static OzoneQuota parseQuota(String quotaString)
-      throws IllegalArgumentException {
-
-    if ((quotaString == null) || (quotaString.isEmpty())) {
-      throw new IllegalArgumentException(
-          "Quota string cannot be null or empty.");
-    }
-
-    String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
-    String size = "";
-    int nSize;
-    Units currUnit = Units.MB;
-    Boolean found = false;
-    if (uppercase.endsWith(OZONE_QUOTA_MB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_MB.length());
-      currUnit = Units.MB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_GB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_GB.length());
-      currUnit = Units.GB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_TB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_TB.length());
-      currUnit = Units.TB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(OZONE_QUOTA_BYTES)) {
-      size = uppercase
-          .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length());
-      currUnit = Units.BYTES;
-      found = true;
-    }
-
-    if (!found) {
-      throw new IllegalArgumentException(
-          "Quota unit not recognized. Supported values are BYTES, MB, GB and " +
-              "TB.");
-    }
-
-    nSize = Integer.parseInt(size);
-    if (nSize < 0) {
-      throw new IllegalArgumentException("Quota cannot be negative.");
-    }
-
-    return new OzoneQuota(nSize, currUnit);
-  }
-
-
-  /**
-   * Returns size in Bytes or -1 if there is no Quota.
-   */
-  public long sizeInBytes() {
-    switch (this.unit) {
-    case BYTES:
-      return this.getSize();
-    case MB:
-      return this.getSize() * OzoneConsts.MB;
-    case GB:
-      return this.getSize() * OzoneConsts.GB;
-    case TB:
-      return this.getSize() * OzoneConsts.TB;
-    case UNDEFINED:
-    default:
-      return -1;
-    }
-  }
-
-  /**
-   * Returns OzoneQuota corresponding to size in bytes.
-   *
-   * @param sizeInBytes size in bytes to be converted
-   *
-   * @return OzoneQuota object
-   */
-  public static OzoneQuota getOzoneQuota(long sizeInBytes) {
-    long size;
-    Units unit;
-    if (sizeInBytes % OzoneConsts.TB == 0) {
-      size = sizeInBytes / OzoneConsts.TB;
-      unit = Units.TB;
-    } else if (sizeInBytes % OzoneConsts.GB == 0) {
-      size = sizeInBytes / OzoneConsts.GB;
-      unit = Units.GB;
-    } else if (sizeInBytes % OzoneConsts.MB == 0) {
-      size = sizeInBytes / OzoneConsts.MB;
-      unit = Units.MB;
-    } else {
-      size = sizeInBytes;
-      unit = Units.BYTES;
-    }
-    return new OzoneQuota((int)size, unit);
-  }
-
-  @Override
-  public String toString() {
-    return size + " " + unit;
-  }
-}

+ 0 - 63
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java

@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * The replication factor to be used while writing key into ozone.
- */
-public enum ReplicationFactor {
-  ONE(1),
-  THREE(3);
-
-  /**
-   * Integer representation of replication.
-   */
-  private int value;
-
-  /**
-   * Initializes ReplicationFactor with value.
-   * @param value replication value
-   */
-  ReplicationFactor(int value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns enum value corresponding to the int value.
-   * @param value replication value
-   * @return ReplicationFactor
-   */
-  public static ReplicationFactor valueOf(int value) {
-    if(value == 1) {
-      return ONE;
-    }
-    if (value == 3) {
-      return THREE;
-    }
-    throw new IllegalArgumentException("Unsupported value: " + value);
-  }
-
-  /**
-   * Returns integer representation of ReplicationFactor.
-   * @return replication value
-   */
-  public int getValue() {
-    return value;
-  }
-}

+ 0 - 28
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java

@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * The replication type to be used while writing key into ozone.
- */
-public enum ReplicationType {
-    RATIS,
-    STAND_ALONE,
-    CHAINED
-}

+ 0 - 23
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * Base property types for HDDS containers and replications.
- */

+ 0 - 185
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java

@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import com.google.gson.Gson;
-import java.io.IOException;
-import java.io.Writer;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.HttpHeaders;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer2;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
-
-/**
- * A servlet to print out the running configuration data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public class HddsConfServlet extends HttpServlet {
-
-  private static final long serialVersionUID = 1L;
-
-  protected static final String FORMAT_JSON = "json";
-  protected static final String FORMAT_XML = "xml";
-  private static final String COMMAND = "cmd";
-  private static final OzoneConfiguration OZONE_CONFIG =
-      new OzoneConfiguration();
-  private static final transient Logger LOG =
-      LoggerFactory.getLogger(HddsConfServlet.class);
-
-
-  /**
-   * Return the Configuration of the daemon hosting this servlet.
-   * This is populated when the HttpServer starts.
-   */
-  private Configuration getConfFromContext() {
-    Configuration conf = (Configuration) getServletContext().getAttribute(
-        HttpServer2.CONF_CONTEXT_ATTRIBUTE);
-    assert conf != null;
-    return conf;
-  }
-
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response)
-      throws ServletException, IOException {
-
-    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
-        request, response)) {
-      return;
-    }
-
-    String format = parseAcceptHeader(request);
-    if (FORMAT_XML.equals(format)) {
-      response.setContentType("text/xml; charset=utf-8");
-    } else if (FORMAT_JSON.equals(format)) {
-      response.setContentType("application/json; charset=utf-8");
-    }
-
-    String name = request.getParameter("name");
-    Writer out = response.getWriter();
-    String cmd = request.getParameter(COMMAND);
-
-    processCommand(cmd, format, request, response, out, name);
-    out.close();
-  }
-
-  private void processCommand(String cmd, String format,
-      HttpServletRequest request, HttpServletResponse response, Writer out,
-      String name)
-      throws IOException {
-    try {
-      if (cmd == null) {
-        if (FORMAT_XML.equals(format)) {
-          response.setContentType("text/xml; charset=utf-8");
-        } else if (FORMAT_JSON.equals(format)) {
-          response.setContentType("application/json; charset=utf-8");
-        }
-
-        writeResponse(getConfFromContext(), out, format, name);
-      } else {
-        processConfigTagRequest(request, out);
-      }
-    } catch (BadFormatException bfe) {
-      response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
-    } catch (IllegalArgumentException iae) {
-      response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  static String parseAcceptHeader(HttpServletRequest request) {
-    String format = request.getHeader(HttpHeaders.ACCEPT);
-    return format != null && format.contains(FORMAT_JSON) ?
-        FORMAT_JSON : FORMAT_XML;
-  }
-
-  /**
-   * Guts of the servlet - extracted for easy testing.
-   */
-  static void writeResponse(Configuration conf,
-      Writer out, String format, String propertyName)
-      throws IOException, IllegalArgumentException, BadFormatException {
-    if (FORMAT_JSON.equals(format)) {
-      Configuration.dumpConfiguration(conf, propertyName, out);
-    } else if (FORMAT_XML.equals(format)) {
-      conf.writeXml(propertyName, out);
-    } else {
-      throw new BadFormatException("Bad format: " + format);
-    }
-  }
-
-  public static class BadFormatException extends Exception {
-
-    private static final long serialVersionUID = 1L;
-
-    public BadFormatException(String msg) {
-      super(msg);
-    }
-  }
-
-  private void processConfigTagRequest(HttpServletRequest request,
-      Writer out) throws IOException {
-    String cmd = request.getParameter(COMMAND);
-    Gson gson = new Gson();
-    Configuration config = getOzoneConfig();
-
-    switch (cmd) {
-    case "getOzoneTags":
-      out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY)
-          .split(",")));
-      break;
-    case "getPropertyByTag":
-      String tags = request.getParameter("tags");
-      Map<String, Properties> propMap = new HashMap<>();
-
-      for (String tag : tags.split(",")) {
-        if (config.isPropertyTag(tag)) {
-          Properties properties = config.getAllPropertiesByTag(tag);
-          propMap.put(tag, properties);
-        } else {
-          LOG.debug("Not a valid tag" + tag);
-        }
-      }
-      out.write(gson.toJsonTree(propMap).toString());
-      break;
-    default:
-      throw new IllegalArgumentException(cmd + " is not a valid command.");
-    }
-
-  }
-
-  private static Configuration getOzoneConfig() {
-    return OZONE_CONFIG;
-  }
-}

+ 0 - 162
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java

@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.conf;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Configuration for ozone.
- */
-@InterfaceAudience.Private
-public class OzoneConfiguration extends Configuration {
-  static {
-    activate();
-  }
-
-  public OzoneConfiguration() {
-    OzoneConfiguration.activate();
-  }
-
-  public OzoneConfiguration(Configuration conf) {
-    super(conf);
-  }
-
-  public List<Property> readPropertyFromXml(URL url) throws JAXBException {
-    JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class);
-    Unmarshaller um = context.createUnmarshaller();
-
-    XMLConfiguration config = (XMLConfiguration) um.unmarshal(url);
-    return config.getProperties();
-  }
-
-  /**
-   * Class to marshall/un-marshall configuration from xml files.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "configuration")
-  public static class XMLConfiguration {
-
-    @XmlElement(name = "property", type = Property.class)
-    private List<Property> properties = new ArrayList<>();
-
-    public XMLConfiguration() {
-    }
-
-    public XMLConfiguration(List<Property> properties) {
-      this.properties = properties;
-    }
-
-    public List<Property> getProperties() {
-      return properties;
-    }
-
-    public void setProperties(List<Property> properties) {
-      this.properties = properties;
-    }
-  }
-
-  /**
-   * Class to marshall/un-marshall configuration properties from xml files.
-   */
-  @XmlAccessorType(XmlAccessType.FIELD)
-  @XmlRootElement(name = "property")
-  public static class Property implements Comparable<Property> {
-
-    private String name;
-    private String value;
-    private String tag;
-    private String description;
-
-    public String getName() {
-      return name;
-    }
-
-    public void setName(String name) {
-      this.name = name;
-    }
-
-    public String getValue() {
-      return value;
-    }
-
-    public void setValue(String value) {
-      this.value = value;
-    }
-
-    public String getTag() {
-      return tag;
-    }
-
-    public void setTag(String tag) {
-      this.tag = tag;
-    }
-
-    public String getDescription() {
-      return description;
-    }
-
-    public void setDescription(String description) {
-      this.description = description;
-    }
-
-    @Override
-    public int compareTo(Property o) {
-      if (this == o) {
-        return 0;
-      }
-      return this.getName().compareTo(o.getName());
-    }
-
-    @Override
-    public String toString() {
-      return this.getName() + " " + this.getValue() + " " + this.getTag();
-    }
-
-    @Override
-    public int hashCode(){
-      return this.getName().hashCode();
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      return (obj instanceof Property) && (((Property) obj).getName())
-          .equals(this.getName());
-    }
-  }
-
-  public static void activate() {
-    // adds the default resources
-    Configuration.addDefaultResource("hdfs-default.xml");
-    Configuration.addDefaultResource("hdfs-site.xml");
-    Configuration.addDefaultResource("ozone-default.xml");
-    Configuration.addDefaultResource("ozone-site.xml");
-  }
-}

+ 0 - 18
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java

@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;

+ 0 - 23
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-/**
- * Generic HDDS specific configurator and helper classes.
- */

+ 0 - 401
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java

@@ -1,401 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.protocol;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * DatanodeDetails class contains details about DataNode like:
- * - UUID of the DataNode.
- * - IP and Hostname details.
- * - Port details to which the DataNode will be listening.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeDetails implements Comparable<DatanodeDetails> {
-
-  /**
-   * DataNode's unique identifier in the cluster.
-   */
-  private final UUID uuid;
-
-  private String ipAddress;
-  private String hostName;
-  private List<Port> ports;
-
-
-  /**
-   * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used
-   * for instantiating DatanodeDetails.
-   * @param uuid DataNode's UUID
-   * @param ipAddress IP Address of this DataNode
-   * @param hostName DataNode's hostname
-   * @param ports Ports used by the DataNode
-   */
-  private DatanodeDetails(String uuid, String ipAddress, String hostName,
-      List<Port> ports) {
-    this.uuid = UUID.fromString(uuid);
-    this.ipAddress = ipAddress;
-    this.hostName = hostName;
-    this.ports = ports;
-  }
-
-  protected DatanodeDetails(DatanodeDetails datanodeDetails) {
-    this.uuid = datanodeDetails.uuid;
-    this.ipAddress = datanodeDetails.ipAddress;
-    this.hostName = datanodeDetails.hostName;
-    this.ports = datanodeDetails.ports;
-  }
-
-  /**
-   * Returns the DataNode UUID.
-   *
-   * @return UUID of DataNode
-   */
-  public UUID getUuid() {
-    return uuid;
-  }
-
-  /**
-   * Returns the string representation of DataNode UUID.
-   *
-   * @return UUID of DataNode
-   */
-  public String getUuidString() {
-    return uuid.toString();
-  }
-
-  /**
-   * Sets the IP address of Datanode.
-   *
-   * @param ip IP Address
-   */
-  public void setIpAddress(String ip) {
-    this.ipAddress = ip;
-  }
-
-  /**
-   * Returns IP address of DataNode.
-   *
-   * @return IP address
-   */
-  public String getIpAddress() {
-    return ipAddress;
-  }
-
-  /**
-   * Sets the Datanode hostname.
-   *
-   * @param host hostname
-   */
-  public void setHostName(String host) {
-    this.hostName = host;
-  }
-
-  /**
-   * Returns Hostname of DataNode.
-   *
-   * @return Hostname
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-  /**
-   * Sets a DataNode Port.
-   *
-   * @param port DataNode port
-   */
-  public void setPort(Port port) {
-    // If the port is already in the list remove it first and add the
-    // new/updated port value.
-    ports.remove(port);
-    ports.add(port);
-  }
-
-  /**
-   * Returns all the Ports used by DataNode.
-   *
-   * @return DataNode Ports
-   */
-  public List<Port> getPorts() {
-    return ports;
-  }
-
-  /**
-   * Given the name returns port number, null if the asked port is not found.
-   *
-   * @param name Name of the port
-   *
-   * @return Port
-   */
-  public Port getPort(Port.Name name) {
-    for (Port port : ports) {
-      if (port.getName().equals(name)) {
-        return port;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Returns a DatanodeDetails from the protocol buffers.
-   *
-   * @param datanodeDetailsProto - protoBuf Message
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails getFromProtoBuf(
-      HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
-    DatanodeDetails.Builder builder = newBuilder();
-    builder.setUuid(datanodeDetailsProto.getUuid());
-    if (datanodeDetailsProto.hasIpAddress()) {
-      builder.setIpAddress(datanodeDetailsProto.getIpAddress());
-    }
-    if (datanodeDetailsProto.hasHostName()) {
-      builder.setHostName(datanodeDetailsProto.getHostName());
-    }
-    for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) {
-      builder.addPort(newPort(
-          Port.Name.valueOf(port.getName().toUpperCase()), port.getValue()));
-    }
-    return builder.build();
-  }
-
-  /**
-   * Returns a DatanodeDetails protobuf message from a datanode ID.
-   * @return HddsProtos.DatanodeDetailsProto
-   */
-  public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
-    HddsProtos.DatanodeDetailsProto.Builder builder =
-        HddsProtos.DatanodeDetailsProto.newBuilder()
-            .setUuid(getUuidString());
-    if (ipAddress != null) {
-      builder.setIpAddress(ipAddress);
-    }
-    if (hostName != null) {
-      builder.setHostName(hostName);
-    }
-    for (Port port : ports) {
-      builder.addPorts(HddsProtos.Port.newBuilder()
-          .setName(port.getName().toString())
-          .setValue(port.getValue())
-          .build());
-    }
-    return builder.build();
-  }
-
-  @Override
-  public String toString() {
-    return uuid.toString() + "{" +
-        "ip: " +
-        ipAddress +
-        ", host: " +
-        hostName +
-        "}";
-  }
-
-  @Override
-  public int compareTo(DatanodeDetails that) {
-    return this.getUuid().compareTo(that.getUuid());
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return obj instanceof DatanodeDetails &&
-        uuid.equals(((DatanodeDetails) obj).uuid);
-  }
-
-  @Override
-  public int hashCode() {
-    return uuid.hashCode();
-  }
-
-  /**
-   * Returns DatanodeDetails.Builder instance.
-   *
-   * @return DatanodeDetails.Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder class for building DatanodeDetails.
-   */
-  public static final class Builder {
-    private String id;
-    private String ipAddress;
-    private String hostName;
-    private List<Port> ports;
-
-    /**
-     * Default private constructor. To create Builder instance use
-     * DatanodeDetails#newBuilder.
-     */
-    private Builder() {
-      ports = new ArrayList<>();
-    }
-
-    /**
-     * Sets the DatanodeUuid.
-     *
-     * @param uuid DatanodeUuid
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setUuid(String uuid) {
-      this.id = uuid;
-      return this;
-    }
-
-    /**
-     * Sets the IP address of DataNode.
-     *
-     * @param ip address
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setIpAddress(String ip) {
-      this.ipAddress = ip;
-      return this;
-    }
-
-    /**
-     * Sets the hostname of DataNode.
-     *
-     * @param host hostname
-     * @return DatanodeDetails.Builder
-     */
-    public Builder setHostName(String host) {
-      this.hostName = host;
-      return this;
-    }
-
-    /**
-     * Adds a DataNode Port.
-     *
-     * @param port DataNode port
-     *
-     * @return DatanodeDetails.Builder
-     */
-    public Builder addPort(Port port) {
-      this.ports.add(port);
-      return this;
-    }
-
-    /**
-     * Builds and returns DatanodeDetails instance.
-     *
-     * @return DatanodeDetails
-     */
-    public DatanodeDetails build() {
-      Preconditions.checkNotNull(id);
-      return new DatanodeDetails(id, ipAddress, hostName, ports);
-    }
-
-  }
-
-  /**
-   * Constructs a new Port with name and value.
-   *
-   * @param name Name of the port
-   * @param value Port number
-   *
-   * @return {@code Port} instance
-   */
-  public static Port newPort(Port.Name name, Integer value) {
-    return new Port(name, value);
-  }
-
-  /**
-   * Container to hold DataNode Port details.
-   */
-  public static final class Port {
-
-    /**
-     * Ports that are supported in DataNode.
-     */
-    public enum Name {
-      STANDALONE, RATIS, REST
-    }
-
-    private Name name;
-    private Integer value;
-
-    /**
-     * Private constructor for constructing Port object. Use
-     * DatanodeDetails#newPort to create a new Port object.
-     *
-     * @param name
-     * @param value
-     */
-    private Port(Name name, Integer value) {
-      this.name = name;
-      this.value = value;
-    }
-
-    /**
-     * Returns the name of the port.
-     *
-     * @return Port name
-     */
-    public Name getName() {
-      return name;
-    }
-
-    /**
-     * Returns the port number.
-     *
-     * @return Port number
-     */
-    public Integer getValue() {
-      return value;
-    }
-
-    @Override
-    public int hashCode() {
-      return name.hashCode();
-    }
-
-    /**
-     * Ports are considered equal if they have the same name.
-     *
-     * @param anObject
-     *          The object to compare this {@code Port} against
-     * @return {@code true} if the given object represents a {@code Port}
-               and has the same name, {@code false} otherwise
-     */
-    @Override
-    public boolean equals(Object anObject) {
-      if (this == anObject) {
-        return true;
-      }
-      if (anObject instanceof Port) {
-        return name.equals(((Port) anObject).name);
-      }
-      return false;
-    }
-  }
-
-}

+ 0 - 22
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains HDDS protocol related classes.
- */
-package org.apache.hadoop.hdds.protocol;

+ 0 - 287
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java

@@ -1,287 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.ratis.shaded.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class contains constants for configuration keys used in SCM.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class ScmConfigKeys {
-
-  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
-      "scm.container.client.idle.threshold";
-  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
-      "10s";
-
-  public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY =
-      "scm.container.client.max.size";
-  public static final int SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT =
-      256;
-
-  public static final String SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS =
-      "scm.container.client.max.outstanding.requests";
-  public static final int SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT
-      = 100;
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = "dfs.container.ratis.enabled";
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = false;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = "dfs.container.ratis.rpc.type";
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = "GRPC";
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = "dfs.container.ratis.num.write.chunk.threads";
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = 60;
-  public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = "dfs.container.ratis.replication.level";
-  public static final ReplicationLevel
-      DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
-      "dfs.container.ratis.segment.size";
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
-      1 * 1024 * 1024 * 1024;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY =
-      "dfs.container.ratis.segment.preallocated.size";
-  public static final int
-      DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 1024;
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.client.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
-      "dfs.ratis.client.request.max.retries";
-  public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180;
-  public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
-      "dfs.ratis.client.request.retry.interval";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
-      TimeDuration.valueOf(100, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.server.retry-cache.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.server.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
-  public static final String
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.leader.election.minimum.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(1, TimeUnit.SECONDS);
-
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      "dfs.ratis.server.failure.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      TimeDuration.valueOf(120, TimeUnit.SECONDS);
-
-  // TODO : this is copied from OzoneConsts, may need to move to a better place
-  public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
-  // 16 MB by default
-  public static final int OZONE_SCM_CHUNK_SIZE_DEFAULT = 16 * 1024 * 1024;
-  public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024;
-
-  public static final String OZONE_SCM_CLIENT_PORT_KEY =
-      "ozone.scm.client.port";
-  public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860;
-
-  public static final String OZONE_SCM_DATANODE_PORT_KEY =
-      "ozone.scm.datanode.port";
-  public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
-
-  // OZONE_OM_PORT_DEFAULT = 9862
-  public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
-      "ozone.scm.block.client.port";
-  public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;
-
-  // Container service client
-  public static final String OZONE_SCM_CLIENT_ADDRESS_KEY =
-      "ozone.scm.client.address";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.client.bind.host";
-  public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  // Block service client
-  public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY =
-      "ozone.scm.block.client.address";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY =
-      "ozone.scm.block.client.bind.host";
-  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_DATANODE_ADDRESS_KEY =
-      "ozone.scm.datanode.address";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY =
-      "ozone.scm.datanode.bind.host";
-  public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-
-  public static final String OZONE_SCM_HTTP_ENABLED_KEY =
-      "ozone.scm.http.enabled";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_KEY =
-      "ozone.scm.http-bind-host";
-  public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY =
-      "ozone.scm.https-bind-host";
-  public static final String OZONE_SCM_HTTP_ADDRESS_KEY =
-      "ozone.scm.http-address";
-  public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
-      "ozone.scm.https-address";
-  public static final String OZONE_SCM_KEYTAB_FILE =
-      "ozone.scm.keytab.file";
-  public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
-  public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
-
-  public static final String HDDS_REST_HTTP_ADDRESS_KEY =
-      "hdds.rest.http-address";
-  public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
-  public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
-  public static final String HDDS_REST_CSRF_ENABLED_KEY =
-      "hdds.rest.rest-csrf.enabled";
-  public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
-  public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
-      "hdds.rest.netty.high.watermark";
-  public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
-  public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
-  public static final String HDDS_REST_NETTY_LOW_WATERMARK =
-      "hdds.rest.netty.low.watermark";
-
-  public static final String OZONE_SCM_HANDLER_COUNT_KEY =
-      "ozone.scm.handler.count.key";
-  public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
-
-  public static final String OZONE_SCM_DEADNODE_INTERVAL =
-      "ozone.scm.dead.node.interval";
-  public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
-      "10m";
-
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
-      "ozone.scm.heartbeat.thread.interval";
-  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
-      "3s";
-
-  public static final String OZONE_SCM_STALENODE_INTERVAL =
-      "ozone.scm.stale.node.interval";
-  public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-      "90s";
-
-  public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
-      "ozone.scm.heartbeat.rpc-timeout";
-  public static final long OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT =
-      1000;
-
-  /**
-   * Defines how frequently we will log the missing of heartbeat to a specific
-   * SCM. In the default case we will write a warning message for each 10
-   * sequential heart beats that we miss to a specific SCM. This is to avoid
-   * overrunning the log with lots of HB missed Log statements.
-   */
-  public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT =
-      "ozone.scm.heartbeat.log.warn.interval.count";
-  public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT =
-      10;
-
-  // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT.
-  // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777
-  //
-  // If this key is not specified datanodes will not be able to find
-  // SCM. The SCM membership can be dynamic, so this key should contain
-  // all possible SCM names. Once the SCM leader is discovered datanodes will
-  // get the right list of SCMs to heartbeat to from the leader.
-  // While it is good for the datanodes to know the names of all SCM nodes,
-  // it is sufficient to actually know the name of on working SCM. That SCM
-  // will be able to return the information about other SCMs that are part of
-  // the SCM replicated Log.
-  //
-  //In case of a membership change, any one of the SCM machines will be
-  // able to send back a new list to the datanodes.
-  public static final String OZONE_SCM_NAMES = "ozone.scm.names";
-
-  public static final int OZONE_SCM_DEFAULT_PORT =
-      OZONE_SCM_DATANODE_PORT_DEFAULT;
-  // File Name and path where datanode ID is to written to.
-  // if this value is not set then container startup will fail.
-  public static final String OZONE_SCM_DATANODE_ID = "ozone.scm.datanode.id";
-
-  public static final String OZONE_SCM_DATANODE_ID_PATH_DEFAULT = "datanode.id";
-
-  public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
-      "ozone.scm.db.cache.size.mb";
-  public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_SCM_CONTAINER_SIZE =
-      "ozone.scm.container.size";
-  public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB";
-
-  public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
-      "ozone.scm.container.placement.impl";
-
-  public static final String OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE =
-      "ozone.scm.container.provision_batch_size";
-  public static final int OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT = 20;
-
-  public static final String
-      OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY =
-      "ozone.scm.keyvalue.container.deletion-choosing.policy";
-
-  public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.container.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
-  public static final String OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT =
-      "ozone.scm.pipeline.creation.lease.timeout";
-
-  public static final String
-      OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
-  public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
-      "ozone.scm.block.deletion.max.retry";
-  public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096;
-
-  public static final String HDDS_SCM_WATCHER_TIMEOUT =
-      "hdds.scm.watcher.timeout";
-
-  public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
-      "10m";
-
-  /**
-   * Never constructed.
-   */
-  private ScmConfigKeys() {
-
-  }
-}

+ 0 - 81
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java

@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * ScmInfo wraps the result returned from SCM#getScmInfo which
- * contains clusterId and the SCM Id.
- */
-public final class ScmInfo {
-  private String clusterId;
-  private String scmId;
-
-  /**
-   * Builder for ScmInfo.
-   */
-  public static class Builder {
-    private String clusterId;
-    private String scmId;
-
-    /**
-     * sets the cluster id.
-     * @param cid clusterId to be set
-     * @return Builder for ScmInfo
-     */
-    public Builder setClusterId(String cid) {
-      this.clusterId = cid;
-      return this;
-    }
-
-    /**
-     * sets the scmId.
-     * @param id scmId
-     * @return Builder for scmInfo
-     */
-    public Builder setScmId(String id) {
-      this.scmId = id;
-      return this;
-    }
-
-    public ScmInfo build() {
-      return new ScmInfo(clusterId, scmId);
-    }
-  }
-
-  private ScmInfo(String clusterId, String scmId) {
-    this.clusterId = clusterId;
-    this.scmId = scmId;
-  }
-
-  /**
-   * Gets the clusterId from the Version file.
-   * @return ClusterId
-   */
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * Gets the SCM Id from the Version file.
-   * @return SCM Id
-   */
-  public String getScmId() {
-    return scmId;
-  }
-}

+ 0 - 135
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java

@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public abstract class XceiverClientSpi implements Closeable {
-
-  final private AtomicInteger referenceCount;
-  private boolean isEvicted;
-
-  XceiverClientSpi() {
-    this.referenceCount = new AtomicInteger(0);
-    this.isEvicted = false;
-  }
-
-  void incrementReference() {
-    this.referenceCount.incrementAndGet();
-  }
-
-  void decrementReference() {
-    this.referenceCount.decrementAndGet();
-    cleanup();
-  }
-
-  void setEvicted() {
-    isEvicted = true;
-    cleanup();
-  }
-
-  // close the xceiverClient only if,
-  // 1) there is no refcount on the client
-  // 2) it has been evicted from the cache.
-  private void cleanup() {
-    if (referenceCount.get() == 0 && isEvicted) {
-      close();
-    }
-  }
-
-  @VisibleForTesting
-  public int getRefcount() {
-    return referenceCount.get();
-  }
-
-  /**
-   * Connects to the leader in the pipeline.
-   */
-  public abstract void connect() throws Exception;
-
-  @Override
-  public abstract void close();
-
-  /**
-   * Returns the pipeline of machines that host the container used by this
-   * client.
-   *
-   * @return pipeline of machines that host the container
-   */
-  public abstract Pipeline getPipeline();
-
-  /**
-   * Sends a given command to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public ContainerCommandResponseProto sendCommand(
-      ContainerCommandRequestProto request) throws IOException {
-    try {
-      return sendCommandAsync(request).get();
-    } catch (ExecutionException | InterruptedException e) {
-      throw new IOException("Failed to command " + request, e);
-    }
-  }
-
-  /**
-   * Sends a given command to server gets a waitable future back.
-   *
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public abstract CompletableFuture<ContainerCommandResponseProto>
-      sendCommandAsync(ContainerCommandRequestProto request)
-      throws IOException, ExecutionException, InterruptedException;
-
-  /**
-   * Create a pipeline.
-   */
-  public abstract void createPipeline() throws IOException;
-
-  /**
-   * Destroy a pipeline.
-   * @throws IOException
-   */
-  public abstract void destroyPipeline() throws IOException;
-
-  /**
-   * Returns pipeline Type.
-   *
-   * @return - {Stand_Alone, Ratis or Chained}
-   */
-  public abstract HddsProtos.ReplicationType getPipelineType();
-}

+ 0 - 174
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java

@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.client;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerData;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * The interface to call into underlying container layer.
- *
- * Written as interface to allow easy testing: implement a mock container layer
- * for standalone testing of CBlock API without actually calling into remote
- * containers. Actual container layer can simply re-implement this.
- *
- * NOTE this is temporarily needed class. When SCM containers are full-fledged,
- * this interface will likely be removed.
- */
-@InterfaceStability.Unstable
-public interface ScmClient extends Closeable {
-  /**
-   * Creates a Container on SCM and returns the pipeline.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  ContainerWithPipeline createContainer(String owner) throws IOException;
-
-  /**
-   * Gets a container by Name -- Throws if the container does not exist.
-   * @param containerId - Container ID
-   * @return Pipeline
-   * @throws IOException
-   */
-  ContainerInfo getContainer(long containerId) throws IOException;
-
-  /**
-   * Gets a container by Name -- Throws if the container does not exist.
-   * @param containerId - Container ID
-   * @return ContainerWithPipeline
-   * @throws IOException
-   */
-  ContainerWithPipeline getContainerWithPipeline(long containerId)
-      throws IOException;
-
-  /**
-   * Close a container.
-   *
-   * @param containerId - ID of the container.
-   * @param pipeline - Pipeline where the container is located.
-   * @throws IOException
-   */
-  void closeContainer(long containerId, Pipeline pipeline) throws IOException;
-
-  /**
-   * Close a container.
-   *
-   * @param containerId - ID of the container.
-   * @throws IOException
-   */
-  void closeContainer(long containerId) throws IOException;
-
-  /**
-   * Deletes an existing container.
-   * @param containerId - ID of the container.
-   * @param pipeline - Pipeline that represents the container.
-   * @param force - true to forcibly delete the container.
-   * @throws IOException
-   */
-  void deleteContainer(long containerId, Pipeline pipeline, boolean force)
-      throws IOException;
-
-  /**
-   * Deletes an existing container.
-   * @param containerId - ID of the container.
-   * @param force - true to forcibly delete the container.
-   * @throws IOException
-   */
-  void deleteContainer(long containerId, boolean force) throws IOException;
-
-  /**
-   * Lists a range of containers and get their info.
-   *
-   * @param startContainerID start containerID.
-   * @param count count must be > 0.
-   *
-   * @return a list of pipeline.
-   * @throws IOException
-   */
-  List<ContainerInfo> listContainer(long startContainerID,
-      int count) throws IOException;
-
-  /**
-   * Read meta data from an existing container.
-   * @param containerID - ID of the container.
-   * @param pipeline - Pipeline where the container is located.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  ContainerData readContainer(long containerID, Pipeline pipeline)
-      throws IOException;
-
-  /**
-   * Read meta data from an existing container.
-   * @param containerID - ID of the container.
-   * @return ContainerInfo
-   * @throws IOException
-   */
-  ContainerData readContainer(long containerID)
-      throws IOException;
-
-  /**
-   * Gets the container size -- Computed by SCM from Container Reports.
-   * @param containerID - ID of the container.
-   * @return number of bytes used by this container.
-   * @throws IOException
-   */
-  long getContainerSize(long containerID) throws IOException;
-
-  /**
-   * Creates a Container on SCM and returns the pipeline.
-   * @param type - Replication Type.
-   * @param replicationFactor - Replication Factor
-   * @return ContainerInfo
-   * @throws IOException - in case of error.
-   */
-  ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor replicationFactor,
-      String owner) throws IOException;
-
-  /**
-   * Returns a set of Nodes that meet a query criteria.
-   * @param nodeStatuses - Criteria that we want the node to have.
-   * @param queryScope - Query scope - Cluster or pool.
-   * @param poolName - if it is pool, a pool name is required.
-   * @return A set of nodes that meet the requested criteria.
-   * @throws IOException
-   */
-  List<HddsProtos.Node> queryNode(HddsProtos.NodeState nodeStatuses,
-      HddsProtos.QueryScope queryScope, String poolName) throws IOException;
-
-  /**
-   * Creates a specified replication pipeline.
-   * @param type - Type
-   * @param factor - Replication factor
-   * @param nodePool - Set of machines.
-   * @throws IOException
-   */
-  Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException;
-}

+ 0 - 24
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */

+ 0 - 107
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java

@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.builder.CompareToBuilder;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-
-/**
- * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
- * <p>
- * We are creating a specific type for this to avoid mixing this with
- * normal integers in code.
- */
-public class ContainerID implements Comparable {
-
-  private final long id;
-
-  /**
-   * Constructs ContainerID.
-   *
-   * @param id int
-   */
-  public ContainerID(long id) {
-    Preconditions.checkState(id > 0,
-        "Container ID should be a positive long. "+ id);
-    this.id = id;
-  }
-
-  /**
-   * Factory method for creation of ContainerID.
-   * @param containerID  long
-   * @return ContainerID.
-   */
-  public static ContainerID valueof(long containerID) {
-    return new ContainerID(containerID);
-  }
-
-  /**
-   * Returns int representation of ID.
-   *
-   * @return int
-   */
-  public long getId() {
-    return id;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerID that = (ContainerID) o;
-
-    return new EqualsBuilder()
-        .append(getId(), that.getId())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(61, 71)
-        .append(getId())
-        .toHashCode();
-  }
-
-  @Override
-  public int compareTo(Object o) {
-    Preconditions.checkNotNull(o);
-    if(getClass() != o.getClass()) {
-      throw new ClassCastException("ContainerID class expected. found:" +
-          o.getClass().toString());
-    }
-
-    ContainerID that = (ContainerID) o;
-    return new CompareToBuilder()
-        .append(this.getId(), that.getId())
-        .build();
-  }
-
-  @Override
-  public String toString() {
-    return "id=" + id;
-  }
-}

+ 0 - 79
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java

@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-
-/**
- * Allocated block wraps the result returned from SCM#allocateBlock which
- * contains a Pipeline and the key.
- */
-public final class AllocatedBlock {
-  private Pipeline pipeline;
-  private BlockID blockID;
-  // Indicates whether the client should create container before writing block.
-  private boolean shouldCreateContainer;
-
-  /**
-   * Builder for AllocatedBlock.
-   */
-  public static class Builder {
-    private Pipeline pipeline;
-    private BlockID blockID;
-    private boolean shouldCreateContainer;
-
-    public Builder setPipeline(Pipeline p) {
-      this.pipeline = p;
-      return this;
-    }
-
-    public Builder setBlockID(BlockID blockId) {
-      this.blockID = blockId;
-      return this;
-    }
-
-    public Builder setShouldCreateContainer(boolean shouldCreate) {
-      this.shouldCreateContainer = shouldCreate;
-      return this;
-    }
-
-    public AllocatedBlock build() {
-      return new AllocatedBlock(pipeline, blockID, shouldCreateContainer);
-    }
-  }
-
-  private AllocatedBlock(Pipeline pipeline, BlockID blockID,
-      boolean shouldCreateContainer) {
-    this.pipeline = pipeline;
-    this.blockID = blockID;
-    this.shouldCreateContainer = shouldCreateContainer;
-  }
-
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public boolean getCreateContainer() {
-    return shouldCreateContainer;
-  }
-}

+ 0 - 36
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java

@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a block is yet to be committed on the datanode.
- */
-public class BlockNotCommittedException extends StorageContainerException {
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   */
-  public BlockNotCommittedException(String message) {
-    super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED);
-  }
-}

+ 0 - 482
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java

@@ -1,482 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import static java.lang.Math.max;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Arrays;
-import java.util.Comparator;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.util.Time;
-
-/**
- * Class wraps ozone container info.
- */
-public class ContainerInfo implements Comparator<ContainerInfo>,
-    Comparable<ContainerInfo>, Externalizable {
-
-  private static final ObjectWriter WRITER;
-  private static final String SERIALIZATION_ERROR_MSG = "Java serialization not"
-      + " supported. Use protobuf instead.";
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
-    mapper
-        .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE);
-    WRITER = mapper.writer();
-  }
-
-  private HddsProtos.LifeCycleState state;
-  @JsonIgnore
-  private PipelineID pipelineID;
-  private ReplicationFactor replicationFactor;
-  private ReplicationType replicationType;
-  // Bytes allocated by SCM for clients.
-  private long allocatedBytes;
-  // Actual container usage, updated through heartbeat.
-  private long usedBytes;
-  private long numberOfKeys;
-  private long lastUsed;
-  // The wall-clock ms since the epoch at which the current state enters.
-  private long stateEnterTime;
-  private String owner;
-  private long containerID;
-  private long deleteTransactionId;
-  /**
-   * Allows you to maintain private data on ContainerInfo. This is not
-   * serialized via protobuf, just allows us to maintain some private data.
-   */
-  @JsonIgnore
-  private byte[] data;
-
-  ContainerInfo(
-      long containerID,
-      HddsProtos.LifeCycleState state,
-      PipelineID pipelineID,
-      long allocatedBytes,
-      long usedBytes,
-      long numberOfKeys,
-      long stateEnterTime,
-      String owner,
-      long deleteTransactionId,
-      ReplicationFactor replicationFactor,
-      ReplicationType repType) {
-    this.containerID = containerID;
-    this.pipelineID = pipelineID;
-    this.allocatedBytes = allocatedBytes;
-    this.usedBytes = usedBytes;
-    this.numberOfKeys = numberOfKeys;
-    this.lastUsed = Time.monotonicNow();
-    this.state = state;
-    this.stateEnterTime = stateEnterTime;
-    this.owner = owner;
-    this.deleteTransactionId = deleteTransactionId;
-    this.replicationFactor = replicationFactor;
-    this.replicationType = repType;
-  }
-
-  public ContainerInfo(ContainerInfo info) {
-    this(info.getContainerID(), info.getState(), info.getPipelineID(),
-        info.getAllocatedBytes(), info.getUsedBytes(), info.getNumberOfKeys(),
-        info.getStateEnterTime(), info.getOwner(),
-        info.getDeleteTransactionId(), info.getReplicationFactor(),
-        info.getReplicationType());
-  }
-  /**
-   * Needed for serialization findbugs.
-   */
-  public ContainerInfo() {
-  }
-
-  public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
-    ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    return builder.setPipelineID(
-        PipelineID.getFromProtobuf(info.getPipelineID()))
-        .setAllocatedBytes(info.getAllocatedBytes())
-        .setUsedBytes(info.getUsedBytes())
-        .setNumberOfKeys(info.getNumberOfKeys())
-        .setState(info.getState())
-        .setStateEnterTime(info.getStateEnterTime())
-        .setOwner(info.getOwner())
-        .setContainerID(info.getContainerID())
-        .setDeleteTransactionId(info.getDeleteTransactionId())
-        .setReplicationFactor(info.getReplicationFactor())
-        .setReplicationType(info.getReplicationType())
-        .build();
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public HddsProtos.LifeCycleState getState() {
-    return state;
-  }
-
-  public void setState(HddsProtos.LifeCycleState state) {
-    this.state = state;
-  }
-
-  public long getStateEnterTime() {
-    return stateEnterTime;
-  }
-
-  public ReplicationFactor getReplicationFactor() {
-    return replicationFactor;
-  }
-
-  public PipelineID getPipelineID() {
-    return pipelineID;
-  }
-
-  public long getAllocatedBytes() {
-    return allocatedBytes;
-  }
-
-  /**
-   * Set Allocated bytes.
-   *
-   * @param size - newly allocated bytes -- negative size is case of deletes
-   * can be used.
-   */
-  public void updateAllocatedBytes(long size) {
-    this.allocatedBytes += size;
-  }
-
-  public long getUsedBytes() {
-    return usedBytes;
-  }
-
-  public long getNumberOfKeys() {
-    return numberOfKeys;
-  }
-
-  public long getDeleteTransactionId() {
-    return deleteTransactionId;
-  }
-
-  public void updateDeleteTransactionId(long transactionId) {
-    deleteTransactionId = max(transactionId, deleteTransactionId);
-  }
-
-  public ContainerID containerID() {
-    return new ContainerID(getContainerID());
-  }
-
-  /**
-   * Gets the last used time from SCM's perspective.
-   *
-   * @return time in milliseconds.
-   */
-  public long getLastUsed() {
-    return lastUsed;
-  }
-
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
-
-  public void updateLastUsedTime() {
-    lastUsed = Time.monotonicNow();
-  }
-
-  public void allocate(long size) {
-    // should we also have total container size in ContainerInfo
-    // and check before allocating?
-    allocatedBytes += size;
-  }
-
-  public HddsProtos.SCMContainerInfo getProtobuf() {
-    HddsProtos.SCMContainerInfo.Builder builder =
-        HddsProtos.SCMContainerInfo.newBuilder();
-    Preconditions.checkState(containerID > 0);
-    return builder.setAllocatedBytes(getAllocatedBytes())
-        .setContainerID(getContainerID())
-        .setUsedBytes(getUsedBytes())
-        .setNumberOfKeys(getNumberOfKeys()).setState(getState())
-        .setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID())
-        .setDeleteTransactionId(getDeleteTransactionId())
-        .setPipelineID(getPipelineID().getProtobuf())
-        .setReplicationFactor(getReplicationFactor())
-        .setReplicationType(getReplicationType())
-        .setOwner(getOwner())
-        .build();
-  }
-
-  public String getOwner() {
-    return owner;
-  }
-
-  public void setOwner(String owner) {
-    this.owner = owner;
-  }
-
-  @Override
-  public String toString() {
-    return "ContainerInfo{"
-        + "id=" + containerID
-        + ", state=" + state
-        + ", pipelineID=" + pipelineID
-        + ", stateEnterTime=" + stateEnterTime
-        + ", owner=" + owner
-        + '}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerInfo that = (ContainerInfo) o;
-
-    return new EqualsBuilder()
-        .append(getContainerID(), that.getContainerID())
-
-        // TODO : Fix this later. If we add these factors some tests fail.
-        // So Commenting this to continue and will enforce this with
-        // Changes in pipeline where we remove Container Name to
-        // SCMContainerinfo from Pipeline.
-        // .append(pipeline.getFactor(), that.pipeline.getFactor())
-        // .append(pipeline.getType(), that.pipeline.getType())
-        .append(owner, that.owner)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(11, 811)
-        .append(getContainerID())
-        .append(getOwner())
-        .toHashCode();
-  }
-
-  /**
-   * Compares its two arguments for order.  Returns a negative integer, zero, or
-   * a positive integer as the first argument is less than, equal to, or greater
-   * than the second.<p>
-   *
-   * @param o1 the first object to be compared.
-   * @param o2 the second object to be compared.
-   * @return a negative integer, zero, or a positive integer as the first
-   * argument is less than, equal to, or greater than the second.
-   * @throws NullPointerException if an argument is null and this comparator
-   *                              does not permit null arguments
-   * @throws ClassCastException   if the arguments' types prevent them from
-   *                              being compared by this comparator.
-   */
-  @Override
-  public int compare(ContainerInfo o1, ContainerInfo o2) {
-    return Long.compare(o1.getLastUsed(), o2.getLastUsed());
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less than,
-   * equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object is
-   * less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  @Override
-  public int compareTo(ContainerInfo o) {
-    return this.compare(this, o);
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns private data that is set on this containerInfo.
-   *
-   * @return blob, the user can interpret it any way they like.
-   */
-  public byte[] getData() {
-    if (this.data != null) {
-      return Arrays.copyOf(this.data, this.data.length);
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Set private data on ContainerInfo object.
-   *
-   * @param data -- private data.
-   */
-  public void setData(byte[] data) {
-    if (data != null) {
-      this.data = Arrays.copyOf(data, data.length);
-    }
-  }
-
-  /**
-   * Throws IOException as default java serialization is not supported. Use
-   * serialization via protobuf instead.
-   *
-   * @param out the stream to write the object to
-   * @throws IOException Includes any I/O exceptions that may occur
-   * @serialData Overriding methods should use this tag to describe
-   * the data layout of this Externalizable object.
-   * List the sequence of element types and, if possible,
-   * relate the element to a public/protected field and/or
-   * method of this Externalizable class.
-   */
-  @Override
-  public void writeExternal(ObjectOutput out) throws IOException {
-    throw new IOException(SERIALIZATION_ERROR_MSG);
-  }
-
-  /**
-   * Throws IOException as default java serialization is not supported. Use
-   * serialization via protobuf instead.
-   *
-   * @param in the stream to read data from in order to restore the object
-   * @throws IOException            if I/O errors occur
-   * @throws ClassNotFoundException If the class for an object being
-   *                                restored cannot be found.
-   */
-  @Override
-  public void readExternal(ObjectInput in)
-      throws IOException, ClassNotFoundException {
-    throw new IOException(SERIALIZATION_ERROR_MSG);
-  }
-
-  /**
-   * Builder class for ContainerInfo.
-   */
-  public static class Builder {
-    private HddsProtos.LifeCycleState state;
-    private long allocated;
-    private long used;
-    private long keys;
-    private long stateEnterTime;
-    private String owner;
-    private long containerID;
-    private long deleteTransactionId;
-    private PipelineID pipelineID;
-    private ReplicationFactor replicationFactor;
-    private ReplicationType replicationType;
-
-    public Builder setReplicationType(
-        ReplicationType repType) {
-      this.replicationType = repType;
-      return this;
-    }
-
-    public Builder setPipelineID(PipelineID pipelineId) {
-      this.pipelineID = pipelineId;
-      return this;
-    }
-
-    public Builder setReplicationFactor(ReplicationFactor repFactor) {
-      this.replicationFactor = repFactor;
-      return this;
-    }
-
-    public Builder setContainerID(long id) {
-      Preconditions.checkState(id >= 0);
-      this.containerID = id;
-      return this;
-    }
-
-    public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
-      this.state = lifeCycleState;
-      return this;
-    }
-
-    public Builder setAllocatedBytes(long bytesAllocated) {
-      this.allocated = bytesAllocated;
-      return this;
-    }
-
-    public Builder setUsedBytes(long bytesUsed) {
-      this.used = bytesUsed;
-      return this;
-    }
-
-    public Builder setNumberOfKeys(long keyCount) {
-      this.keys = keyCount;
-      return this;
-    }
-
-    public Builder setStateEnterTime(long time) {
-      this.stateEnterTime = time;
-      return this;
-    }
-
-    public Builder setOwner(String containerOwner) {
-      this.owner = containerOwner;
-      return this;
-    }
-
-    public Builder setDeleteTransactionId(long deleteTransactionID) {
-      this.deleteTransactionId = deleteTransactionID;
-      return this;
-    }
-
-    public ContainerInfo build() {
-      return new ContainerInfo(containerID, state, pipelineID, allocated,
-              used, keys, stateEnterTime, owner, deleteTransactionId,
-          replicationFactor, replicationType);
-    }
-  }
-
-  /**
-   * Check if a container is in open state, this will check if the
-   * container is either open, allocated, creating or creating.
-   * Any containers in these states is managed as an open container by SCM.
-   */
-  public boolean isContainerOpen() {
-    return state == HddsProtos.LifeCycleState.ALLOCATED ||
-        state == HddsProtos.LifeCycleState.CREATING ||
-        state == HddsProtos.LifeCycleState.OPEN ||
-        state == HddsProtos.LifeCycleState.CLOSING;
-  }
-}

+ 0 - 132
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java

@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import java.util.Comparator;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-/**
- * Class wraps ozone container info.
- */
-public class ContainerWithPipeline implements Comparator<ContainerWithPipeline>,
-    Comparable<ContainerWithPipeline> {
-
-  private final ContainerInfo containerInfo;
-  private final Pipeline pipeline;
-
-  public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) {
-    this.containerInfo = containerInfo;
-    this.pipeline = pipeline;
-  }
-
-  public ContainerInfo getContainerInfo() {
-    return containerInfo;
-  }
-
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  public static ContainerWithPipeline fromProtobuf(
-      HddsProtos.ContainerWithPipeline allocatedContainer) {
-    return new ContainerWithPipeline(
-        ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
-        Pipeline.getFromProtoBuf(allocatedContainer.getPipeline()));
-  }
-
-  public HddsProtos.ContainerWithPipeline getProtobuf() {
-    HddsProtos.ContainerWithPipeline.Builder builder =
-        HddsProtos.ContainerWithPipeline.newBuilder();
-    builder.setContainerInfo(getContainerInfo().getProtobuf())
-        .setPipeline(getPipeline().getProtobufMessage());
-
-    return builder.build();
-  }
-
-
-  @Override
-  public String toString() {
-    return containerInfo.toString() + " | " + pipeline.toString();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    ContainerWithPipeline that = (ContainerWithPipeline) o;
-
-    return new EqualsBuilder()
-        .append(getContainerInfo(), that.getContainerInfo())
-        .append(getPipeline(), that.getPipeline())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(11, 811)
-        .append(getContainerInfo())
-        .append(getPipeline())
-        .toHashCode();
-  }
-
-  /**
-   * Compares its two arguments for order.  Returns a negative integer, zero, or
-   * a positive integer as the first argument is less than, equal to, or greater
-   * than the second.<p>
-   *
-   * @param o1 the first object to be compared.
-   * @param o2 the second object to be compared.
-   * @return a negative integer, zero, or a positive integer as the first
-   * argument is less than, equal to, or greater than the second.
-   * @throws NullPointerException if an argument is null and this comparator
-   *                              does not permit null arguments
-   * @throws ClassCastException   if the arguments' types prevent them from
-   *                              being compared by this comparator.
-   */
-  @Override
-  public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) {
-    return o1.getContainerInfo().compareTo(o2.getContainerInfo());
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less than,
-   * equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object is
-   * less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  @Override
-  public int compareTo(ContainerWithPipeline o) {
-    return this.compare(this, o);
-  }
-
-}

+ 0 - 53
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java

@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
-
-/**
- * Class wraps storage container manager block deletion results.
- */
-public class DeleteBlockResult {
-  private BlockID blockID;
-  private DeleteScmBlockResult.Result result;
-
-  public DeleteBlockResult(final BlockID blockID,
-      final DeleteScmBlockResult.Result result) {
-    this.blockID = blockID;
-    this.result = result;
-  }
-
-  /**
-   * Get block id deleted.
-   * @return block id.
-   */
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  /**
-   * Get key deletion result.
-   * @return key deletion result.
-   */
-  public DeleteScmBlockResult.Result getResult() {
-    return result;
-  }
-}

+ 0 - 315
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java

@@ -1,315 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.List;
-
-/**
- * A pipeline represents the group of machines over which a container lives.
- */
-public class Pipeline {
-  static final String PIPELINE_INFO = "PIPELINE_INFO_FILTER";
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"leaderID", "datanodes"};
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    WRITER = mapper.writer(filters);
-  }
-
-  @JsonIgnore
-  private String leaderID;
-  @JsonIgnore
-  private Map<String, DatanodeDetails> datanodes;
-  private HddsProtos.LifeCycleState lifeCycleState;
-  private HddsProtos.ReplicationType type;
-  private HddsProtos.ReplicationFactor factor;
-  private PipelineID id;
-
-  /**
-   * Constructs a new pipeline data structure.
-   *
-   * @param leaderID       -  Leader datanode id
-   * @param lifeCycleState  - Pipeline State
-   * @param replicationType - Replication protocol
-   * @param replicationFactor - replication count on datanodes
-   * @param id  - pipeline ID
-   */
-  public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState,
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor, PipelineID id) {
-    this.leaderID = leaderID;
-    this.lifeCycleState = lifeCycleState;
-    this.type = replicationType;
-    this.factor = replicationFactor;
-    this.id = id;
-    datanodes = new ConcurrentHashMap<>();
-  }
-
-  @Override
-  public int hashCode() {
-    return id.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    Pipeline that = (Pipeline) o;
-
-    return id.equals(that.id)
-            && factor.equals(that.factor)
-            && type.equals(that.type)
-            && lifeCycleState.equals(that.lifeCycleState)
-            && leaderID.equals(that.leaderID);
-
-  }
-
-  /**
-   * Gets pipeline object from protobuf.
-   *
-   * @param pipelineProto - ProtoBuf definition for the pipeline.
-   * @return Pipeline Object
-   */
-  public static Pipeline getFromProtoBuf(
-      HddsProtos.Pipeline pipelineProto) {
-    Preconditions.checkNotNull(pipelineProto);
-    Pipeline pipeline =
-        new Pipeline(pipelineProto.getLeaderID(),
-            pipelineProto.getState(),
-            pipelineProto.getType(),
-            pipelineProto.getFactor(),
-            PipelineID.getFromProtobuf(pipelineProto.getId()));
-
-    for (HddsProtos.DatanodeDetailsProto dataID :
-        pipelineProto.getMembersList()) {
-      pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID));
-    }
-    return pipeline;
-  }
-
-  /**
-   * returns the replication count.
-   * @return Replication Factor
-   */
-  public HddsProtos.ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  /**
-   * Returns the first machine in the set of datanodes.
-   *
-   * @return First Machine.
-   */
-  @JsonIgnore
-  public DatanodeDetails getLeader() {
-    return getDatanodes().get(leaderID);
-  }
-
-  /**
-   * Adds a datanode to pipeline
-   * @param datanodeDetails datanode to be added.
-   * @return true if the dn was not earlier present, false otherwise
-   */
-  public boolean addMember(DatanodeDetails datanodeDetails) {
-    return datanodes.put(datanodeDetails.getUuid().toString(),
-        datanodeDetails) == null;
-
-  }
-
-  public void resetPipeline() {
-    // reset datanodes in pipeline and learn about them through
-    // pipeline reports on SCM restart
-    datanodes.clear();
-  }
-
-  public Map<String, DatanodeDetails> getDatanodes() {
-    return datanodes;
-  }
-
-  public boolean isEmpty() {
-    return datanodes.isEmpty();
-  }
-  /**
-   * Returns the leader host.
-   *
-   * @return First Machine.
-   */
-  public String getLeaderHost() {
-    return getDatanodes()
-        .get(leaderID).getHostName();
-  }
-
-  /**
-   *
-   * @return lead
-   */
-  public String getLeaderID() {
-    return leaderID;
-  }
-  /**
-   * Returns all machines that make up this pipeline.
-   *
-   * @return List of Machines.
-   */
-  @JsonIgnore
-  public List<DatanodeDetails> getMachines() {
-    return new ArrayList<>(getDatanodes().values());
-  }
-
-  /**
-   * Returns all machines that make up this pipeline.
-   *
-   * @return List of Machines.
-   */
-  public List<String> getDatanodeHosts() {
-    List<String> dataHosts = new ArrayList<>();
-    for (DatanodeDetails datanode : getDatanodes().values()) {
-      dataHosts.add(datanode.getHostName());
-    }
-    return dataHosts;
-  }
-
-  /**
-   * Return a Protobuf Pipeline message from pipeline.
-   *
-   * @return Protobuf message
-   */
-  @JsonIgnore
-  public HddsProtos.Pipeline getProtobufMessage() {
-    HddsProtos.Pipeline.Builder builder =
-        HddsProtos.Pipeline.newBuilder();
-    for (DatanodeDetails datanode : datanodes.values()) {
-      builder.addMembers(datanode.getProtoBufMessage());
-    }
-    builder.setLeaderID(leaderID);
-
-    if (lifeCycleState != null) {
-      builder.setState(lifeCycleState);
-    }
-    if (type != null) {
-      builder.setType(type);
-    }
-
-    if (factor != null) {
-      builder.setFactor(factor);
-    }
-
-    if (id != null) {
-      builder.setId(id.getProtobuf());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Gets the State of the pipeline.
-   *
-   * @return - LifeCycleStates.
-   */
-  public HddsProtos.LifeCycleState getLifeCycleState() {
-    return lifeCycleState;
-  }
-
-  /**
-   * Update the State of the pipeline.
-   */
-  public void setLifeCycleState(HddsProtos.LifeCycleState nextState) {
-    lifeCycleState = nextState;
-  }
-
-  /**
-   * Gets the pipeline id.
-   *
-   * @return - Id of the pipeline
-   */
-  public PipelineID getId() {
-    return id;
-  }
-
-  /**
-   * Returns the type.
-   *
-   * @return type - Standalone, Ratis, Chained.
-   */
-  public HddsProtos.ReplicationType getType() {
-    return type;
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder b = new StringBuilder(getClass().getSimpleName())
-        .append("[");
-    getDatanodes().keySet().forEach(
-        node -> b.append(node.endsWith(getLeaderID()) ? "*" + id : id));
-    b.append(" id:").append(id);
-    if (getType() != null) {
-      b.append(" type:").append(getType().toString());
-    }
-    if (getFactor() != null) {
-      b.append(" factor:").append(getFactor().toString());
-    }
-    if (getLifeCycleState() != null) {
-      b.append(" State:").append(getLifeCycleState().toString());
-    }
-    return b.toString();
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  @JsonFilter(PIPELINE_INFO)
-  class MixIn {
-  }
-}

+ 0 - 97
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java

@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.ratis.protocol.RaftGroupId;
-
-import java.util.UUID;
-
-/**
- * ID for the pipeline, the ID is based on UUID so that it can be used
- * in Ratis as RaftGroupId, GroupID is used by the datanodes to initialize
- * the ratis group they are part of.
- */
-public final class PipelineID implements Comparable<PipelineID> {
-
-  private UUID id;
-  private RaftGroupId groupId;
-
-  private PipelineID(UUID id) {
-    this.id = id;
-    this.groupId = RaftGroupId.valueOf(id);
-  }
-
-  public static PipelineID randomId() {
-    return new PipelineID(UUID.randomUUID());
-  }
-
-  public static PipelineID valueOf(UUID id) {
-    return new PipelineID(id);
-  }
-
-  public static PipelineID valueOf(RaftGroupId groupId) {
-    return valueOf(groupId.getUuid());
-  }
-
-  public RaftGroupId getRaftGroupID() {
-    return groupId;
-  }
-
-  public UUID getId() {
-    return id;
-  }
-
-  public HddsProtos.PipelineID getProtobuf() {
-    return HddsProtos.PipelineID.newBuilder().setId(id.toString()).build();
-  }
-
-  public static PipelineID getFromProtobuf(HddsProtos.PipelineID protos) {
-    return new PipelineID(UUID.fromString(protos.getId()));
-  }
-
-  @Override
-  public String toString() {
-    return "pipelineId=" + id;
-  }
-
-  @Override
-  public int compareTo(PipelineID o) {
-    return this.id.compareTo(o.id);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    PipelineID that = (PipelineID) o;
-
-    return id.equals(that.id);
-  }
-
-  @Override
-  public int hashCode() {
-    return id.hashCode();
-  }
-}

+ 0 - 104
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java

@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import java.io.IOException;
-
-/**
- * Exceptions thrown from the Storage Container.
- */
-public class StorageContainerException extends IOException {
-  private ContainerProtos.Result result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public StorageContainerException(ContainerProtos.Result result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   * @param result - The result code
-   */
-  public StorageContainerException(String message,
-      ContainerProtos.Result result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the {@link #getMessage()} method)
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   *
-   * @param result - The result code
-   * @since 1.6
-   */
-  public StorageContainerException(String message, Throwable cause,
-      ContainerProtos.Result result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @param result - The result code
-   * @since 1.6
-   */
-  public StorageContainerException(Throwable cause, ContainerProtos.Result
-      result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns Result.
-   *
-   * @return Result.
-   */
-  public ContainerProtos.Result getResult() {
-    return result;
-  }
-
-
-}

+ 0 - 22
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-/**
- Contains protocol buffer helper classes and utilites used in
- impl.
- **/

+ 0 - 18
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java

@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;

+ 0 - 24
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */

+ 0 - 127
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java

@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-import java.util.Set;
-
-/**
- * Holds the nodes that currently host the container for an object key hash.
- */
-@InterfaceAudience.Private
-public final class LocatedContainer {
-  private final String key;
-  private final String matchedKeyPrefix;
-  private final String containerName;
-  private final Set<DatanodeInfo> locations;
-  private final DatanodeInfo leader;
-
-  /**
-   * Creates a LocatedContainer.
-   *
-   * @param key object key
-   * @param matchedKeyPrefix prefix of key that was used to find the location
-   * @param containerName container name
-   * @param locations nodes that currently host the container
-   * @param leader node that currently acts as pipeline leader
-   */
-  public LocatedContainer(String key, String matchedKeyPrefix,
-      String containerName, Set<DatanodeInfo> locations, DatanodeInfo leader) {
-    this.key = key;
-    this.matchedKeyPrefix = matchedKeyPrefix;
-    this.containerName = containerName;
-    this.locations = locations;
-    this.leader = leader;
-  }
-
-  /**
-   * Returns the container name.
-   *
-   * @return container name
-   */
-  public String getContainerName() {
-    return this.containerName;
-  }
-
-  /**
-   * Returns the object key.
-   *
-   * @return object key
-   */
-  public String getKey() {
-    return this.key;
-  }
-
-  /**
-   * Returns the node that currently acts as pipeline leader.
-   *
-   * @return node that currently acts as pipeline leader
-   */
-  public DatanodeInfo getLeader() {
-    return this.leader;
-  }
-
-  /**
-   * Returns the nodes that currently host the container.
-   *
-   * @return Set<DatanodeInfo> nodes that currently host the container
-   */
-  public Set<DatanodeInfo> getLocations() {
-    return this.locations;
-  }
-
-  /**
-   * Returns the prefix of the key that was used to find the location.
-   *
-   * @return prefix of the key that was used to find the location
-   */
-  public String getMatchedKeyPrefix() {
-    return this.matchedKeyPrefix;
-  }
-
-  @Override
-  public boolean equals(Object otherObj) {
-    if (otherObj == null) {
-      return false;
-    }
-    if (!(otherObj instanceof LocatedContainer)) {
-      return false;
-    }
-    LocatedContainer other = (LocatedContainer)otherObj;
-    return this.key == null ? other.key == null : this.key.equals(other.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return key.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName()
-        + "{key=" + key
-        + "; matchedKeyPrefix=" + matchedKeyPrefix
-        + "; containerName=" + containerName
-        + "; locations=" + locations
-        + "; leader=" + leader
-        + "}";
-  }
-}

+ 0 - 60
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java

@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
- * to read/write a block.
- */
-public interface ScmBlockLocationProtocol {
-
-  /**
-   * Asks SCM where a block should be allocated. SCM responds with the
-   * set of datanodes that should be used creating this block.
-   * @param size - size of the block.
-   * @return allocated block accessing info (key, pipeline).
-   * @throws IOException
-   */
-  AllocatedBlock allocateBlock(long size, ReplicationType type,
-      ReplicationFactor factor, String owner) throws IOException;
-
-  /**
-   * Delete blocks for a set of object keys.
-   *
-   * @param keyBlocksInfoList Map of object key and its blocks.
-   * @return list of block deletion results.
-   * @throws IOException if there is any failure.
-   */
-  List<DeleteBlockGroupResult>
-      deleteKeyBlocks(List<BlockGroup> keyBlocksInfoList) throws IOException;
-
-  /**
-   * Gets the Clusterid and SCM Id from SCM.
-   */
-  ScmInfo getScmInfo() throws IOException;
-}

+ 0 - 100
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java

@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Holds the nodes that currently host the block for a block key.
- */
-@InterfaceAudience.Private
-public final class ScmLocatedBlock {
-  private final String key;
-  private final List<DatanodeInfo> locations;
-  private final DatanodeInfo leader;
-
-  /**
-   * Creates a ScmLocatedBlock.
-   *
-   * @param key object key
-   * @param locations nodes that currently host the block
-   * @param leader node that currently acts as pipeline leader
-   */
-  public ScmLocatedBlock(final String key, final List<DatanodeInfo> locations,
-      final DatanodeInfo leader) {
-    this.key = key;
-    this.locations = locations;
-    this.leader = leader;
-  }
-
-  /**
-   * Returns the object key.
-   *
-   * @return object key
-   */
-  public String getKey() {
-    return this.key;
-  }
-
-  /**
-   * Returns the node that currently acts as pipeline leader.
-   *
-   * @return node that currently acts as pipeline leader
-   */
-  public DatanodeInfo getLeader() {
-    return this.leader;
-  }
-
-  /**
-   * Returns the nodes that currently host the block.
-   *
-   * @return List<DatanodeInfo> nodes that currently host the block
-   */
-  public List<DatanodeInfo> getLocations() {
-    return this.locations;
-  }
-
-  @Override
-  public boolean equals(Object otherObj) {
-    if (otherObj == null) {
-      return false;
-    }
-    if (!(otherObj instanceof ScmLocatedBlock)) {
-      return false;
-    }
-    ScmLocatedBlock other = (ScmLocatedBlock)otherObj;
-    return this.key == null ? other.key == null : this.key.equals(other.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return key.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName() + "{key=" + key + "; locations="
-        + locations.stream().map(loc -> loc.toString()).collect(Collectors
-            .joining(",")) + "; leader=" + leader + "}";
-  }
-}

+ 0 - 152
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java

@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
- * that currently host a container.
- */
-public interface StorageContainerLocationProtocol {
-  /**
-   * Asks SCM where a container should be allocated. SCM responds with the
-   * set of datanodes that should be used creating this container.
-   *
-   */
-  ContainerWithPipeline allocateContainer(
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor factor, String owner)
-      throws IOException;
-
-  /**
-   * Ask SCM the location of the container. SCM responds with a group of
-   * nodes where this container and its replicas are located.
-   *
-   * @param containerID - ID of the container.
-   * @return ContainerInfo - the container info such as where the pipeline
-   *                         is located.
-   * @throws IOException
-   */
-  ContainerInfo getContainer(long containerID) throws IOException;
-
-  /**
-   * Ask SCM the location of the container. SCM responds with a group of
-   * nodes where this container and its replicas are located.
-   *
-   * @param containerID - ID of the container.
-   * @return ContainerWithPipeline - the container info with the pipeline.
-   * @throws IOException
-   */
-  ContainerWithPipeline getContainerWithPipeline(long containerID)
-      throws IOException;
-
-  /**
-   * Ask SCM a list of containers with a range of container names
-   * and the limit of count.
-   * Search container names between start name(exclusive), and
-   * use prefix name to filter the result. the max size of the
-   * searching range cannot exceed the value of count.
-   *
-   * @param startContainerID start container ID.
-   * @param count count, if count < 0, the max size is unlimited.(
-   *              Usually the count will be replace with a very big
-   *              value instead of being unlimited in case the db is very big)
-   *
-   * @return a list of container.
-   * @throws IOException
-   */
-  List<ContainerInfo> listContainer(long startContainerID, int count)
-      throws IOException;
-
-  /**
-   * Deletes a container in SCM.
-   *
-   * @param containerID
-   * @throws IOException
-   *   if failed to delete the container mapping from db store
-   *   or container doesn't exist.
-   */
-  void deleteContainer(long containerID) throws IOException;
-
-  /**
-   *  Queries a list of Node Statuses.
-   * @param state
-   * @return List of Datanodes.
-   */
-  List<HddsProtos.Node> queryNode(HddsProtos.NodeState state,
-      HddsProtos.QueryScope queryScope, String poolName) throws IOException;
-
-  /**
-   * Notify from client when begin or finish creating objects like pipeline
-   * or containers on datanodes.
-   * Container will be in Operational state after that.
-   * @param type object type
-   * @param id object id
-   * @param op operation type (e.g., create, close, delete)
-   * @param stage creation stage
-   */
-  void notifyObjectStageChange(
-      ObjectStageChangeRequestProto.Type type, long id,
-      ObjectStageChangeRequestProto.Op op,
-      ObjectStageChangeRequestProto.Stage stage) throws IOException;
-
-  /**
-   * Creates a replication pipeline of a specified type.
-   * @param type - replication type
-   * @param factor - factor 1 or 3
-   * @param nodePool - optional machine list to build a pipeline.
-   * @throws IOException
-   */
-  Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
-      throws IOException;
-
-  /**
-   * Returns information about SCM.
-   *
-   * @return {@link ScmInfo}
-   * @throws IOException
-   */
-  ScmInfo getScmInfo() throws IOException;
-
-  /**
-   * Check if SCM is in chill mode.
-   *
-   * @return Returns true if SCM is in chill mode else returns false.
-   * @throws IOException
-   */
-  boolean inChillMode() throws IOException;
-
-  /**
-   * Force SCM out of Chill mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  boolean forceExitChillMode() throws IOException;
-}

+ 0 - 19
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java

@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;

+ 0 - 173
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java

@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .AllocateScmBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .AllocateScmBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmKeyBlocksRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmKeyBlocksResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .KeyBlocks;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link ScmBlockLocationProtocol} interface to the RPC server
- * implementing {@link ScmBlockLocationProtocolPB}.
- */
-@InterfaceAudience.Private
-public final class ScmBlockLocationProtocolClientSideTranslatorPB
-    implements ScmBlockLocationProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final ScmBlockLocationProtocolPB rpcProxy;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
-   *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
-   */
-  public ScmBlockLocationProtocolClientSideTranslatorPB(
-      ScmBlockLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Asks SCM where a block should be allocated. SCM responds with the
-   * set of datanodes that should be used creating this block.
-   * @param size - size of the block.
-   * @return allocated block accessing info (key, pipeline).
-   * @throws IOException
-   */
-  @Override
-  public AllocatedBlock allocateBlock(long size,
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner) throws IOException {
-    Preconditions.checkArgument(size > 0, "block size must be greater than 0");
-
-    AllocateScmBlockRequestProto request =
-        AllocateScmBlockRequestProto.newBuilder().setSize(size).setType(type)
-            .setFactor(factor).setOwner(owner).build();
-    final AllocateScmBlockResponseProto response;
-    try {
-      response = rpcProxy.allocateScmBlock(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (response.getErrorCode() !=
-        AllocateScmBlockResponseProto.Error.success) {
-      throw new IOException(response.hasErrorMessage() ?
-          response.getErrorMessage() : "Allocate block failed.");
-    }
-    AllocatedBlock.Builder builder = new AllocatedBlock.Builder()
-        .setBlockID(BlockID.getFromProtobuf(response.getBlockID()))
-        .setPipeline(Pipeline.getFromProtoBuf(response.getPipeline()))
-        .setShouldCreateContainer(response.getCreateContainer());
-    return builder.build();
-  }
-
-  /**
-   * Delete the set of keys specified.
-   *
-   * @param keyBlocksInfoList batch of block keys to delete.
-   * @return list of block deletion results.
-   * @throws IOException if there is any failure.
-   *
-   */
-  @Override
-  public List<DeleteBlockGroupResult> deleteKeyBlocks(
-      List<BlockGroup> keyBlocksInfoList) throws IOException {
-    List<KeyBlocks> keyBlocksProto = keyBlocksInfoList.stream()
-        .map(BlockGroup::getProto).collect(Collectors.toList());
-    DeleteScmKeyBlocksRequestProto request = DeleteScmKeyBlocksRequestProto
-        .newBuilder().addAllKeyBlocks(keyBlocksProto).build();
-
-    final DeleteScmKeyBlocksResponseProto resp;
-    try {
-      resp = rpcProxy.deleteScmKeyBlocks(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    List<DeleteBlockGroupResult> results =
-        new ArrayList<>(resp.getResultsCount());
-    results.addAll(resp.getResultsList().stream().map(
-        result -> new DeleteBlockGroupResult(result.getObjectKey(),
-            DeleteBlockGroupResult
-                .convertBlockResultProto(result.getBlockResultsList())))
-        .collect(Collectors.toList()));
-    return results;
-  }
-
-  /**
-   * Gets the cluster Id and Scm Id from SCM.
-   * @return ScmInfo
-   * @throws IOException
-   */
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    HddsProtos.GetScmInfoRequestProto request =
-        HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
-    HddsProtos.GetScmInfoRespsonseProto resp;
-    try {
-      resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    ScmInfo.Builder builder = new ScmInfo.Builder()
-        .setClusterId(resp.getClusterId())
-        .setScmId(resp.getScmId());
-    return builder.build();
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-}

+ 0 - 35
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java

@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .ScmBlockLocationProtocolService;
-import org.apache.hadoop.ipc.ProtocolInfo;
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  This extends the
- * Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface ScmBlockLocationProtocolPB
-    extends ScmBlockLocationProtocolService.BlockingInterface {
-}

+ 0 - 371
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java

@@ -1,371 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitChillModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitChillModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InChillModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InChillModeResponseProto;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link StorageContainerLocationProtocol} interface to the RPC server
- * implementing {@link StorageContainerLocationProtocolPB}.
- */
-@InterfaceAudience.Private
-public final class StorageContainerLocationProtocolClientSideTranslatorPB
-    implements StorageContainerLocationProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final StorageContainerLocationProtocolPB rpcProxy;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
-   *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
-   */
-  public StorageContainerLocationProtocolClientSideTranslatorPB(
-      StorageContainerLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Asks SCM where a container should be allocated. SCM responds with the set
-   * of datanodes that should be used creating this container. Ozone/SCM only
-   * supports replication factor of either 1 or 3.
-   * @param type - Replication Type
-   * @param factor - Replication Count
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public ContainerWithPipeline allocateContainer(
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner) throws IOException {
-
-    ContainerRequestProto request = ContainerRequestProto.newBuilder()
-        .setReplicationFactor(factor)
-        .setReplicationType(type)
-        .setOwner(owner)
-        .build();
-
-    final ContainerResponseProto response;
-    try {
-      response = rpcProxy.allocateContainer(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (response.getErrorCode() != ContainerResponseProto.Error.success) {
-      throw new IOException(response.hasErrorMessage() ?
-          response.getErrorMessage() : "Allocate container failed.");
-    }
-    return ContainerWithPipeline.fromProtobuf(
-        response.getContainerWithPipeline());
-  }
-
-  public ContainerInfo getContainer(long containerID) throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    GetContainerRequestProto request = GetContainerRequestProto
-        .newBuilder()
-        .setContainerID(containerID)
-        .build();
-    try {
-      GetContainerResponseProto response =
-          rpcProxy.getContainer(NULL_RPC_CONTROLLER, request);
-      return ContainerInfo.fromProtobuf(response.getContainerInfo());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public ContainerWithPipeline getContainerWithPipeline(long containerID)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    GetContainerWithPipelineRequestProto request =
-        GetContainerWithPipelineRequestProto.newBuilder()
-            .setContainerID(containerID).build();
-    try {
-      GetContainerWithPipelineResponseProto response =
-          rpcProxy.getContainerWithPipeline(NULL_RPC_CONTROLLER, request);
-      return ContainerWithPipeline.fromProtobuf(
-          response.getContainerWithPipeline());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<ContainerInfo> listContainer(long startContainerID, int count)
-      throws IOException {
-    Preconditions.checkState(startContainerID >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(count > 0,
-        "Container count must be greater than 0.");
-    SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto
-        .newBuilder();
-    builder.setStartContainerID(startContainerID);
-    builder.setCount(count);
-    SCMListContainerRequestProto request = builder.build();
-
-    try {
-      SCMListContainerResponseProto response =
-          rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
-      List<ContainerInfo> containerList = new ArrayList<>();
-      for (HddsProtos.SCMContainerInfo containerInfoProto : response
-          .getContainersList()) {
-        containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
-      }
-      return containerList;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Ask SCM to delete a container by name. SCM will remove
-   * the container mapping in its database.
-   *
-   * @param containerID
-   * @throws IOException
-   */
-  @Override
-  public void deleteContainer(long containerID)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto
-        .newBuilder()
-        .setContainerID(containerID)
-        .build();
-    try {
-      rpcProxy.deleteContainer(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Queries a list of Node Statuses.
-   *
-   * @param nodeStatuses
-   * @return List of Datanodes.
-   */
-  @Override
-  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
-      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
-      throws IOException {
-    // TODO : We support only cluster wide query right now. So ignoring checking
-    // queryScope and poolName
-    Preconditions.checkNotNull(nodeStatuses);
-    NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder()
-        .setState(nodeStatuses)
-        .setScope(queryScope).setPoolName(poolName).build();
-    try {
-      NodeQueryResponseProto response =
-          rpcProxy.queryNode(NULL_RPC_CONTROLLER, request);
-      return response.getDatanodesList();
-    } catch (ServiceException e) {
-      throw  ProtobufHelper.getRemoteException(e);
-    }
-
-  }
-
-  /**
-   * Notify from client that creates object on datanodes.
-   * @param type object type
-   * @param id object id
-   * @param op operation type (e.g., create, close, delete)
-   * @param stage object creation stage : begin/complete
-   */
-  @Override
-  public void notifyObjectStageChange(
-      ObjectStageChangeRequestProto.Type type, long id,
-      ObjectStageChangeRequestProto.Op op,
-      ObjectStageChangeRequestProto.Stage stage) throws IOException {
-    Preconditions.checkState(id >= 0,
-        "Object id cannot be negative.");
-    ObjectStageChangeRequestProto request =
-        ObjectStageChangeRequestProto.newBuilder()
-            .setType(type)
-            .setId(id)
-            .setOp(op)
-            .setStage(stage)
-            .build();
-    try {
-      rpcProxy.notifyObjectStageChange(NULL_RPC_CONTROLLER, request);
-    } catch(ServiceException e){
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Creates a replication pipeline of a specified type.
-   *
-   * @param replicationType - replication type
-   * @param factor - factor 1 or 3
-   * @param nodePool - optional machine list to build a pipeline.
-   * @throws IOException
-   */
-  @Override
-  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
-      replicationType, HddsProtos.ReplicationFactor factor, HddsProtos
-      .NodePool nodePool) throws IOException {
-    PipelineRequestProto request = PipelineRequestProto.newBuilder()
-        .setNodePool(nodePool)
-        .setReplicationFactor(factor)
-        .setReplicationType(replicationType)
-        .build();
-    try {
-      PipelineResponseProto response =
-          rpcProxy.allocatePipeline(NULL_RPC_CONTROLLER, request);
-      if (response.getErrorCode() ==
-          PipelineResponseProto.Error.success) {
-        Preconditions.checkState(response.hasPipeline(), "With success, " +
-            "must come a pipeline");
-        return Pipeline.getFromProtoBuf(response.getPipeline());
-      } else {
-        String errorMessage = String.format("create replication pipeline " +
-                "failed. code : %s Message: %s", response.getErrorCode(),
-            response.hasErrorMessage() ? response.getErrorMessage() : "");
-        throw new IOException(errorMessage);
-      }
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    HddsProtos.GetScmInfoRequestProto request =
-        HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
-    try {
-      HddsProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo(
-          NULL_RPC_CONTROLLER, request);
-      ScmInfo.Builder builder = new ScmInfo.Builder()
-          .setClusterId(resp.getClusterId())
-          .setScmId(resp.getScmId());
-      return builder.build();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-  }
-
-  /**
-   * Check if SCM is in chill mode.
-   *
-   * @return Returns true if SCM is in chill mode else returns false.
-   * @throws IOException
-   */
-  @Override
-  public boolean inChillMode() throws IOException {
-    InChillModeRequestProto request =
-        InChillModeRequestProto.getDefaultInstance();
-    try {
-      InChillModeResponseProto resp = rpcProxy.inChillMode(
-          NULL_RPC_CONTROLLER, request);
-      return resp.getInChillMode();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Force SCM out of Chill mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  @Override
-  public boolean forceExitChillMode() throws IOException {
-    ForceExitChillModeRequestProto request =
-        ForceExitChillModeRequestProto.getDefaultInstance();
-    try {
-      ForceExitChillModeResponseProto resp = rpcProxy
-          .forceExitChillMode(NULL_RPC_CONTROLLER, request);
-      return resp.getExitedChillMode();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-}

+ 0 - 36
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos
-    .StorageContainerLocationProtocolService;
-import org.apache.hadoop.ipc.ProtocolInfo;
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  This extends the
- * Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface StorageContainerLocationProtocolPB
-    extends StorageContainerLocationProtocolService.BlockingInterface {
-}

+ 0 - 24
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */

+ 0 - 432
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java

@@ -1,432 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .BlockNotCommittedException;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CloseContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .DatanodeBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.client.BlockID;
-
-import java.io.IOException;
-
-/**
- * Implementation of all container protocol calls performed by Container
- * clients.
- */
-public final class ContainerProtocolCalls  {
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private ContainerProtocolCalls() {
-  }
-
-  /**
-   * Calls the container protocol to get a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param datanodeBlockID blockID to identify container
-   * @param traceID container protocol call args
-   * @return container protocol get block response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
-      DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
-    GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
-        .newBuilder()
-        .setBlockID(datanodeBlockID);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.GetBlock)
-        .setContainerID(datanodeBlockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setGetBlock(readBlockRequest)
-        .build();
-    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-
-    return response.getGetBlock();
-  }
-
-  /**
-   * Calls the container protocol to get the length of a committed block.
-   *
-   * @param xceiverClient client to perform call
-   * @param blockID blockId for the Block
-   * @param traceID container protocol call args
-   * @return container protocol getLastCommittedBlockLength response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ContainerProtos.GetCommittedBlockLengthResponseProto
-      getCommittedBlockLength(
-          XceiverClientSpi xceiverClient, BlockID blockID, String traceID)
-      throws IOException {
-    ContainerProtos.GetCommittedBlockLengthRequestProto.Builder
-        getBlockLengthRequestBuilder =
-        ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder().
-            setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.GetCommittedBlockLength)
-            .setContainerID(blockID.getContainerID())
-            .setTraceID(traceID)
-            .setDatanodeUuid(id)
-            .setGetCommittedBlockLength(getBlockLengthRequestBuilder).build();
-    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-    return response.getGetCommittedBlockLength();
-  }
-
-  /**
-   * Calls the container protocol to put a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param containerBlockData block data to identify container
-   * @param traceID container protocol call args
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static void putBlock(XceiverClientSpi xceiverClient,
-      BlockData containerBlockData, String traceID) throws IOException {
-    PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto
-        .newBuilder()
-        .setBlockData(containerBlockData);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.PutBlock)
-        .setContainerID(containerBlockData.getBlockID().getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setPutBlock(createBlockRequest)
-        .build();
-    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Calls the container protocol to read a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to read
-   * @param blockID ID of the block
-   * @param traceID container protocol call args
-   * @return container protocol read chunk response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient,
-        ChunkInfo chunk, BlockID blockID, String traceID) throws IOException {
-    ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.ReadChunk)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setReadChunk(readChunkRequest)
-        .build();
-    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-    return response.getReadChunk();
-  }
-
-  /**
-   * Calls the container protocol to write a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to write
-   * @param blockID ID of the block
-   * @param data the data of the chunk to write
-   * @param traceID container protocol call args
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk,
-      BlockID blockID, ByteString data, String traceID)
-      throws IOException {
-    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk)
-        .setData(data);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.WriteChunk)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setWriteChunk(writeChunkRequest)
-        .build();
-    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Allows writing a small file using single RPC. This takes the container
-   * name, block name and data to write sends all that data to the container
-   * using a single RPC. This API is designed to be used for files which are
-   * smaller than 1 MB.
-   *
-   * @param client - client that communicates with the container.
-   * @param blockID - ID of the block
-   * @param data - Data to be written into the container.
-   * @param traceID - Trace ID for logging purpose.
-   * @throws IOException
-   */
-  public static void writeSmallFile(XceiverClientSpi client,
-      BlockID blockID, byte[] data, String traceID)
-      throws IOException {
-
-    BlockData containerBlockData =
-        BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
-            .build();
-    PutBlockRequestProto.Builder createBlockRequest =
-        PutBlockRequestProto.newBuilder()
-            .setBlockData(containerBlockData);
-
-    KeyValue keyValue =
-        KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
-            .build();
-    ChunkInfo chunk =
-        ChunkInfo.newBuilder().setChunkName(blockID.getLocalID()
-            + "_chunk").setOffset(0).setLen(data.length).
-            addMetadata(keyValue).build();
-
-    PutSmallFileRequestProto putSmallFileRequest =
-        PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
-            .setBlock(createBlockRequest).setData(ByteString.copyFrom(data))
-            .build();
-
-    String id = client.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.PutSmallFile)
-            .setContainerID(blockID.getContainerID())
-            .setTraceID(traceID)
-            .setDatanodeUuid(id)
-            .setPutSmallFile(putSmallFileRequest)
-            .build();
-    ContainerCommandResponseProto response = client.sendCommand(request);
-    validateContainerResponse(response);
-  }
-
-  /**
-   * createContainer call that creates a container on the datanode.
-   * @param client  - client
-   * @param containerID - ID of container
-   * @param traceID - traceID
-   * @throws IOException
-   */
-  public static void createContainer(XceiverClientSpi client, long containerID,
-      String traceID) throws IOException {
-    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
-        ContainerProtos.CreateContainerRequestProto
-            .newBuilder();
-    createRequest.setContainerType(ContainerProtos.ContainerType
-        .KeyValueContainer);
-
-    String id = client.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setContainerID(containerID);
-    request.setCreateContainer(createRequest.build());
-    request.setDatanodeUuid(id);
-    request.setTraceID(traceID);
-    ContainerCommandResponseProto response = client.sendCommand(
-        request.build());
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Deletes a container from a pipeline.
-   *
-   * @param client
-   * @param force whether or not to forcibly delete the container.
-   * @param traceID
-   * @throws IOException
-   */
-  public static void deleteContainer(XceiverClientSpi client, long containerID,
-      boolean force, String traceID) throws IOException {
-    ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
-        ContainerProtos.DeleteContainerRequestProto.newBuilder();
-    deleteRequest.setForceDelete(force);
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteContainer);
-    request.setContainerID(containerID);
-    request.setDeleteContainer(deleteRequest);
-    request.setTraceID(traceID);
-    request.setDatanodeUuid(id);
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build());
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Close a container.
-   *
-   * @param client
-   * @param containerID
-   * @param traceID
-   * @throws IOException
-   */
-  public static void closeContainer(XceiverClientSpi client,
-      long containerID, String traceID) throws IOException {
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance());
-    request.setTraceID(traceID);
-    request.setDatanodeUuid(id);
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build());
-    validateContainerResponse(response);
-  }
-
-  /**
-   * readContainer call that gets meta data from an existing container.
-   *
-   * @param client - client
-   * @param traceID - trace ID
-   * @throws IOException
-   */
-  public static ReadContainerResponseProto readContainer(
-      XceiverClientSpi client, long containerID,
-      String traceID) throws IOException {
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(Type.ReadContainer);
-    request.setContainerID(containerID);
-    request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(id);
-    request.setTraceID(traceID);
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build());
-    validateContainerResponse(response);
-
-    return response.getReadContainer();
-  }
-
-  /**
-   * Reads the data given the blockID.
-   *
-   * @param client
-   * @param blockID - ID of the block
-   * @param traceID - trace ID
-   * @return GetSmallFileResponseProto
-   * @throws IOException
-   */
-  public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client,
-      BlockID blockID, String traceID) throws IOException {
-    GetBlockRequestProto.Builder getBlock = GetBlockRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
-        GetSmallFileRequestProto
-            .newBuilder().setBlock(getBlock)
-            .build();
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.GetSmallFile)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setGetSmallFile(getSmallFileRequest)
-        .build();
-    ContainerCommandResponseProto response = client.sendCommand(request);
-    validateContainerResponse(response);
-
-    return response.getGetSmallFile();
-  }
-
-  /**
-   * Validates a response from a container protocol call.  Any non-successful
-   * return code is mapped to a corresponding exception and thrown.
-   *
-   * @param response container protocol call response
-   * @throws IOException if the container protocol call failed
-   */
-  private static void validateContainerResponse(
-      ContainerCommandResponseProto response
-  ) throws StorageContainerException {
-    if (response.getResult() == ContainerProtos.Result.SUCCESS) {
-      return;
-    } else if (response.getResult()
-        == ContainerProtos.Result.BLOCK_NOT_COMMITTED) {
-      throw new BlockNotCommittedException(response.getMessage());
-    }
-    throw new StorageContainerException(
-        response.getMessage(), response.getResult());
-  }
-}

+ 0 - 23
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-/**
- * This package contains StorageContainerManager classes.
- */

+ 0 - 233
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java

@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone;
-
-import java.util.Objects;
-
-/**
- * OzoneACL classes define bucket ACLs used in OZONE.
- *
- * ACLs in Ozone follow this pattern.
- * <ul>
- * <li>user:name:rw
- * <li>group:name:rw
- * <li>world::rw
- * </ul>
- */
-public class OzoneAcl {
-  private OzoneACLType type;
-  private String name;
-  private OzoneACLRights rights;
-
-  /**
-   * Constructor for OzoneAcl.
-   */
-  public OzoneAcl() {
-  }
-
-  /**
-   * Constructor for OzoneAcl.
-   *
-   * @param type - Type
-   * @param name - Name of user
-   * @param rights - Rights
-   */
-  public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) {
-    this.name = name;
-    this.rights = rights;
-    this.type = type;
-    if (type == OzoneACLType.WORLD && name.length() != 0) {
-      throw new IllegalArgumentException("Unexpected name part in world type");
-    }
-    if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP))
-        && (name.length() == 0)) {
-      throw new IllegalArgumentException("User or group name is required");
-    }
-  }
-
-  /**
-   * Parses an ACL string and returns the ACL object.
-   *
-   * @param acl - Acl String , Ex. user:anu:rw
-   *
-   * @return - Ozone ACLs
-   */
-  public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException {
-    if ((acl == null) || acl.isEmpty()) {
-      throw new IllegalArgumentException("ACLs cannot be null or empty");
-    }
-    String[] parts = acl.trim().split(":");
-    if (parts.length < 3) {
-      throw new IllegalArgumentException("ACLs are not in expected format");
-    }
-
-    OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase());
-    OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase());
-
-    // TODO : Support sanitation of these user names by calling into
-    // userAuth Interface.
-    return new OzoneAcl(aclType, parts[1], rights);
-  }
-
-  @Override
-  public String toString() {
-    return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables.
-   *
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return Objects.hash(this.getName(), this.getRights().toString(),
-                        this.getType().toString());
-  }
-
-  /**
-   * Returns name.
-   *
-   * @return name
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns Rights.
-   *
-   * @return - Rights
-   */
-  public OzoneACLRights getRights() {
-    return rights;
-  }
-
-  /**
-   * Returns Type.
-   *
-   * @return type
-   */
-  public OzoneACLType getType() {
-    return type;
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    OzoneAcl otherAcl = (OzoneAcl) obj;
-    return otherAcl.getName().equals(this.getName()) &&
-        otherAcl.getRights() == this.getRights() &&
-        otherAcl.getType() == this.getType();
-  }
-
-  /**
-   * ACL types.
-   */
-  public enum OzoneACLType {
-    USER(OzoneConsts.OZONE_ACL_USER_TYPE),
-    GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
-    WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE);
-
-    /**
-     * String value for this Enum.
-     */
-    private final String value;
-
-    /**
-     * Init OzoneACLtypes enum.
-     *
-     * @param val String type for this enum.
-     */
-    OzoneACLType(String val) {
-      value = val;
-    }
-  }
-
-  /**
-   * ACL rights.
-   */
-  public enum OzoneACLRights {
-    READ, WRITE, READ_WRITE;
-
-    /**
-     * Returns the ACL rights based on passed in String.
-     *
-     * @param type ACL right string
-     *
-     * @return OzoneACLRights
-     */
-    public static OzoneACLRights getACLRight(String type) {
-      if (type == null || type.isEmpty()) {
-        throw new IllegalArgumentException("ACL right cannot be empty");
-      }
-
-      switch (type) {
-      case OzoneConsts.OZONE_ACL_READ:
-        return OzoneACLRights.READ;
-      case OzoneConsts.OZONE_ACL_WRITE:
-        return OzoneACLRights.WRITE;
-      case OzoneConsts.OZONE_ACL_READ_WRITE:
-      case OzoneConsts.OZONE_ACL_WRITE_READ:
-        return OzoneACLRights.READ_WRITE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-
-    }
-
-    /**
-     * Returns String representation of ACL rights.
-     * @param acl OzoneACLRights
-     * @return String representation of acl
-     */
-    public static String getACLRightsString(OzoneACLRights acl) {
-      switch(acl) {
-      case READ:
-        return OzoneConsts.OZONE_ACL_READ;
-      case WRITE:
-        return OzoneConsts.OZONE_ACL_WRITE;
-      case READ_WRITE:
-        return OzoneConsts.OZONE_ACL_READ_WRITE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-    }
-
-  }
-
-}

+ 0 - 308
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java

@@ -1,308 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-
-import org.apache.ratis.shaded.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-/**
- * This class contains constants for configuration keys used in Ozone.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class OzoneConfigKeys {
-  public static final String OZONE_TAGS_SYSTEM_KEY =
-      "ozone.tags.system";
-  public static final String DFS_CONTAINER_IPC_PORT =
-      "dfs.container.ipc";
-  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
-
-  /**
-   *
-   * When set to true, allocate a random free port for ozone container,
-   * so that a mini cluster is able to launch multiple containers on a node.
-   *
-   * When set to false (default), container port is fixed as specified by
-   * DFS_CONTAINER_IPC_PORT_DEFAULT.
-   */
-  public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
-      "dfs.container.ipc.random.port";
-  public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
-      false;
-
-  /**
-   * Ratis Port where containers listen to.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_PORT =
-      "dfs.container.ratis.ipc";
-  public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
-
-  /**
-   * When set to true, allocate a random free port for ozone container, so that
-   * a mini cluster is able to launch multiple containers on a node.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
-      "dfs.container.ratis.ipc.random.port";
-  public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
-      false;
-  public static final String OZONE_ENABLED =
-      "ozone.enabled";
-  public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_TRACE_ENABLED_KEY =
-      "ozone.trace.enabled";
-  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
-
-  public static final String OZONE_METADATA_DIRS =
-      "ozone.metadata.dirs";
-
-  public static final String OZONE_METADATA_STORE_IMPL =
-      "ozone.metastore.impl";
-  public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =
-      "LevelDB";
-  public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
-      "RocksDB";
-  public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
-      OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
-      "ozone.metastore.rocksdb.statistics";
-
-  public static final String  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT =
-      "ALL";
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF =
-      "OFF";
-
-  public static final String OZONE_CONTAINER_CACHE_SIZE =
-      "ozone.container.cache.size";
-  public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024;
-
-  public static final String OZONE_SCM_BLOCK_SIZE_IN_MB =
-      "ozone.scm.block.size.in.mb";
-  public static final long OZONE_SCM_BLOCK_SIZE_DEFAULT = 256;
-
-  /**
-   * Ozone administrator users delimited by comma.
-   * If not set, only the user who launches an ozone service will be the
-   * admin user. This property must be set if ozone services are started by
-   * different users. Otherwise the RPC layer will reject calls from
-   * other servers which are started by users not in the list.
-   * */
-  public static final String OZONE_ADMINISTRATORS =
-      "ozone.administrators";
-
-  public static final String OZONE_CLIENT_PROTOCOL =
-      "ozone.client.protocol";
-
-  // This defines the overall connection limit for the connection pool used in
-  // RestClient.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
-      "ozone.rest.client.http.connection.max";
-  public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
-
-  // This defines the connection limit per one HTTP route/host.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
-      "ozone.rest.client.http.connection.per-route.max";
-
-  public static final int
-      OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
-
-  public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
-      "ozone.client.socket.timeout";
-  public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
-  public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
-      "ozone.client.connection.timeout";
-  public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
-
-  public static final String OZONE_REPLICATION = "ozone.replication";
-  public static final int OZONE_REPLICATION_DEFAULT =
-      ReplicationFactor.THREE.getValue();
-
-  public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type";
-  public static final String OZONE_REPLICATION_TYPE_DEFAULT =
-      ReplicationType.RATIS.toString();
-
-  /**
-   * Configuration property to configure the cache size of client list calls.
-   */
-  public static final String OZONE_CLIENT_LIST_CACHE_SIZE =
-      "ozone.client.list.cache";
-  public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000;
-
-  /**
-   * Configuration properties for Ozone Block Deleting Service.
-   */
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
-      "ozone.block.deleting.service.interval";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
-      = "60s";
-
-  /**
-   * The interval of open key clean service.
-   */
-  public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS =
-      "ozone.open.key.cleanup.service.interval.seconds";
-  public static final int
-      OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT
-      = 24 * 3600; // a total of 24 hour
-
-  /**
-   * An open key gets cleaned up when it is being in open state for too long.
-   */
-  public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS =
-      "ozone.open.key.expire.threshold";
-  public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT =
-      24 * 3600;
-
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
-      "ozone.block.deleting.service.timeout";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
-      = "300s"; // 300s for default
-
-  public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
-      "ozone.key.preallocation.maxsize";
-  public static final long OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT
-      = 128 * OzoneConsts.MB;
-
-  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
-      "ozone.block.deleting.limit.per.task";
-  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
-      = 1000;
-
-  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
-      = "ozone.block.deleting.container.limit.per.interval";
-  public static final int
-      OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
-
-  public static final String OZONE_CLIENT_MAX_RETRIES =
-      "ozone.client.max.retries";
-  public static final int OZONE_CLIENT_MAX_RETRIES_DEFAULT = 50;
-
-  public static final String OZONE_CLIENT_RETRY_INTERVAL =
-      "ozone.client.retry.interval";
-  public static final String OZONE_CLIENT_RETRY_INTERVAL_DEFAULT = "200ms";
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY;
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY;
-  public static final ReplicationLevel
-      DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
-  public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
-      = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
-  public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
-      "dfs.container.ratis.datanode.storage.dir";
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT;
-  public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY;
-  public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT;
-  public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT;
-  public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
-  public static final String
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT;
-
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT;
-
-  public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.web.authentication.kerberos.principal";
-
-  public static final String HDDS_DATANODE_PLUGINS_KEY =
-      "hdds.datanode.plugins";
-
-  public static final String
-      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD =
-      "hdds.datanode.storage.utilization.warning.threshold";
-  public static final double
-      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.95;
-  public static final String
-      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD =
-      "hdds.datanode.storage.utilization.critical.threshold";
-  public static final double
-      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
-
-  public static final String
-      HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
-      "hdds.write.lock.reporting.threshold.ms";
-  public static final long
-      HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
-  public static final String
-      HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_KEY =
-      "hdds.lock.suppress.warning.interval.ms";
-  public static final long
-      HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT = 10000L;
-
-  public static final String OZONE_CONTAINER_COPY_WORKDIR =
-      "hdds.datanode.replication.work.dir";
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private OzoneConfigKeys() {
-  }
-}

+ 0 - 216
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

@@ -1,216 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * Set of constants used in Ozone implementation.
- */
-@InterfaceAudience.Private
-public final class OzoneConsts {
-
-
-  public static final String STORAGE_DIR = "scm";
-  public static final String SCM_ID = "scmUuid";
-
-  public static final String OZONE_SIMPLE_ROOT_USER = "root";
-  public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
-
-  public static final String STORAGE_ID = "storageID";
-  public static final String DATANODE_UUID = "datanodeUuid";
-  public static final String CLUSTER_ID = "clusterID";
-  public static final String LAYOUTVERSION = "layOutVersion";
-  public static final String CTIME = "ctime";
-  /*
-   * BucketName length is used for both buckets and volume lengths
-   */
-  public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3;
-  public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63;
-
-  public static final String OZONE_ACL_USER_TYPE = "user";
-  public static final String OZONE_ACL_GROUP_TYPE = "group";
-  public static final String OZONE_ACL_WORLD_TYPE = "world";
-
-  public static final String OZONE_ACL_READ = "r";
-  public static final String OZONE_ACL_WRITE = "w";
-  public static final String OZONE_ACL_READ_WRITE = "rw";
-  public static final String OZONE_ACL_WRITE_READ = "wr";
-
-  public static final String OZONE_DATE_FORMAT =
-      "EEE, dd MMM yyyy HH:mm:ss zzz";
-  public static final String OZONE_TIME_ZONE = "GMT";
-
-  public static final String OZONE_COMPONENT = "component";
-  public static final String OZONE_FUNCTION  = "function";
-  public static final String OZONE_RESOURCE = "resource";
-  public static final String OZONE_USER = "user";
-  public static final String OZONE_REQUEST = "request";
-
-  public static final String OZONE_URI_SCHEME = "o3";
-  public static final String OZONE_HTTP_SCHEME = "http";
-  public static final String OZONE_URI_DELIMITER = "/";
-
-  public static final String CONTAINER_EXTENSION = ".container";
-  public static final String CONTAINER_META = ".meta";
-
-  // Refer to {@link ContainerReader} for container storage layout on disk.
-  public static final String CONTAINER_PREFIX  = "containers";
-  public static final String CONTAINER_META_PATH = "metadata";
-  public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
-  public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
-  public static final String CONTAINER_ROOT_PREFIX = "repository";
-
-  public static final String FILE_HASH = "SHA-256";
-  public final static String CHUNK_OVERWRITE = "OverWriteRequested";
-
-  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
-  public static final long KB = 1024L;
-  public static final long MB = KB * 1024L;
-  public static final long GB = MB * 1024L;
-  public static final long TB = GB * 1024L;
-
-  /**
-   * level DB names used by SCM and data nodes.
-   */
-  public static final String CONTAINER_DB_SUFFIX = "container.db";
-  public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
-  public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
-  public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX;
-  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-  public static final String OM_DB_NAME = "om.db";
-
-  public static final String STORAGE_DIR_CHUNKS = "chunks";
-
-  /**
-   * Supports Bucket Versioning.
-   */
-  public enum Versioning {
-    NOT_DEFINED, ENABLED, DISABLED;
-
-    public static Versioning getVersioning(boolean versioning) {
-      return versioning ? ENABLED : DISABLED;
-    }
-  }
-
-  public static final String DELETING_KEY_PREFIX = "#deleting#";
-  public static final String DELETED_KEY_PREFIX = "#deleted#";
-  public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-
-  /**
-   * OM LevelDB prefixes.
-   *
-   * OM DB stores metadata as KV pairs with certain prefixes,
-   * prefix is used to improve the performance to get related
-   * metadata.
-   *
-   * OM DB Schema:
-   *  ----------------------------------------------------------
-   *  |  KEY                                     |     VALUE   |
-   *  ----------------------------------------------------------
-   *  | $userName                                |  VolumeList |
-   *  ----------------------------------------------------------
-   *  | /#volumeName                             |  VolumeInfo |
-   *  ----------------------------------------------------------
-   *  | /#volumeName/#bucketName                 |  BucketInfo |
-   *  ----------------------------------------------------------
-   *  | /volumeName/bucketName/keyName           |  KeyInfo    |
-   *  ----------------------------------------------------------
-   *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
-   *  ----------------------------------------------------------
-   */
-
-  public static final String OM_KEY_PREFIX = "/";
-  public static final String OM_USER_PREFIX = "$";
-
-  /**
-   * Max OM Quota size of 1024 PB.
-   */
-  public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
-
-  /**
-   * Max number of keys returned per list buckets operation.
-   */
-  public static final int MAX_LISTBUCKETS_SIZE  = 1024;
-
-  /**
-   * Max number of keys returned per list keys operation.
-   */
-  public static final int MAX_LISTKEYS_SIZE  = 1024;
-
-  /**
-   * Max number of volumes returned per list volumes operation.
-   */
-  public static final int MAX_LISTVOLUMES_SIZE = 1024;
-
-  public static final int INVALID_PORT = -1;
-
-
-  // The ServiceListJSONServlet context attribute where OzoneManager
-  // instance gets stored.
-  public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
-
-  private OzoneConsts() {
-    // Never Constructed
-  }
-
-  // YAML fields for .container files
-  public static final String CONTAINER_ID = "containerID";
-  public static final String CONTAINER_TYPE = "containerType";
-  public static final String STATE = "state";
-  public static final String METADATA = "metadata";
-  public static final String MAX_SIZE = "maxSize";
-  public static final String METADATA_PATH = "metadataPath";
-  public static final String CHUNKS_PATH = "chunksPath";
-  public static final String CONTAINER_DB_TYPE = "containerDBType";
-  public static final String CHECKSUM = "checksum";
-
-  // For OM Audit usage
-  public static final String VOLUME = "volume";
-  public static final String BUCKET = "bucket";
-  public static final String KEY = "key";
-  public static final String QUOTA = "quota";
-  public static final String QUOTA_IN_BYTES = "quotaInBytes";
-  public static final String CLIENT_ID = "clientID";
-  public static final String OWNER = "owner";
-  public static final String ADMIN = "admin";
-  public static final String USERNAME = "username";
-  public static final String PREV_KEY = "prevKey";
-  public static final String START_KEY = "startKey";
-  public static final String MAX_KEYS = "maxKeys";
-  public static final String PREFIX = "prefix";
-  public static final String KEY_PREFIX = "keyPrefix";
-  public static final String ACLS = "acls";
-  public static final String USER_ACL = "userAcl";
-  public static final String ADD_ACLS = "addAcls";
-  public static final String REMOVE_ACLS = "removeAcls";
-  public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
-  public static final String TO_KEY_NAME = "toKeyName";
-  public static final String STORAGE_TYPE = "storageType";
-  public static final String IS_VERSION_ENABLED = "isVersionEnabled";
-  public static final String CREATION_TIME = "creationTime";
-  public static final String DATA_SIZE = "dataSize";
-  public static final String REPLICATION_TYPE = "replicationType";
-  public static final String REPLICATION_FACTOR = "replicationFactor";
-  public static final String KEY_LOCATION_INFO = "keyLocationInfo";
-
-
-
-}

+ 0 - 30
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java

@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Interface to define AuditAction.
- */
-public interface AuditAction {
-  /**
-   * Implementation must override.
-   * @return String
-   */
-  String getAction();
-}
-

+ 0 - 36
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define AuditEventStatus values.
- */
-public enum AuditEventStatus {
-  SUCCESS("SUCCESS"),
-  FAILURE("FAILURE");
-
-  private String status;
-
-  AuditEventStatus(String status){
-    this.status = status;
-  }
-
-  public String getStatus() {
-    return status;
-  }
-}

+ 0 - 76
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java

@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.logging.log4j.Level;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.spi.ExtendedLogger;
-
-
-/**
- * Class to define Audit Logger for Ozone.
- */
-public class AuditLogger {
-
-  private ExtendedLogger logger;
-  private static final String FQCN = AuditLogger.class.getName();
-  private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
-  private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
-
-  /**
-   * Parametrized Constructor to initialize logger.
-   * @param type Audit Logger Type
-   */
-  public AuditLogger(AuditLoggerType type){
-    initializeLogger(type);
-  }
-
-  /**
-   * Initializes the logger with specific type.
-   * @param loggerType specified one of the values from enum AuditLoggerType.
-   */
-  private void initializeLogger(AuditLoggerType loggerType){
-    this.logger = LogManager.getContext(false).getLogger(loggerType.getType());
-  }
-
-  @VisibleForTesting
-  public ExtendedLogger getLogger() {
-    return logger;
-  }
-
-  public void logWriteSuccess(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, msg, null);
-  }
-
-  public void logWriteFailure(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, msg,
-        msg.getThrowable());
-  }
-
-  public void logReadSuccess(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.INFO, READ_MARKER, msg, null);
-  }
-
-  public void logReadFailure(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.ERROR, READ_MARKER, msg,
-        msg.getThrowable());
-  }
-
-}

+ 0 - 37
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java

@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enumeration for defining types of Audit Loggers in Ozone.
- */
-public enum AuditLoggerType {
-  DNLOGGER("DNAudit"),
-  OMLOGGER("OMAudit"),
-  SCMLOGGER("SCMAudit");
-
-  private String type;
-
-  public String getType() {
-    return type;
-  }
-
-  AuditLoggerType(String type){
-    this.type = type;
-  }
-}

+ 0 - 38
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java

@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.MarkerManager;
-
-/**
- * Defines audit marker types.
- */
-public enum AuditMarker {
-  WRITE(MarkerManager.getMarker("WRITE")),
-  READ(MarkerManager.getMarker("READ"));
-
-  private Marker marker;
-
-  AuditMarker(Marker marker){
-    this.marker = marker;
-  }
-
-  public Marker getMarker(){
-    return marker;
-  }
-}

+ 0 - 131
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java

@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.logging.log4j.message.Message;
-
-import java.util.Map;
-
-/**
- * Defines audit message structure.
- */
-public class AuditMessage implements Message {
-
-  private String message;
-  private Throwable throwable;
-
-  private static final String MSG_PATTERN =
-      "user=%s | ip=%s | op=%s %s | ret=%s";
-
-  public AuditMessage(){
-
-  }
-
-  @Override
-  public String getFormattedMessage() {
-    return message;
-  }
-
-  @Override
-  public String getFormat() {
-    return null;
-  }
-
-  @Override
-  public Object[] getParameters() {
-    return new Object[0];
-  }
-
-  @Override
-  public Throwable getThrowable() {
-    return throwable;
-  }
-
-  /**
-   * Use when there are custom string to be added to default msg.
-   * @param customMessage custom string
-   */
-  private void appendMessage(String customMessage) {
-    this.message += customMessage;
-  }
-
-  public String getMessage() {
-    return message;
-  }
-
-  public void setMessage(String message) {
-    this.message = message;
-  }
-
-  public void setThrowable(Throwable throwable) {
-    this.throwable = throwable;
-  }
-
-  /**
-   * Builder class for AuditMessage.
-   */
-  public static class Builder {
-    private Throwable throwable;
-    private String user;
-    private String ip;
-    private String op;
-    private Map<String, String> params;
-    private String ret;
-
-    public Builder(){
-
-    }
-
-    public Builder setUser(String usr){
-      this.user = usr;
-      return this;
-    }
-
-    public Builder atIp(String ipAddr){
-      this.ip = ipAddr;
-      return this;
-    }
-
-    public Builder forOperation(String operation){
-      this.op = operation;
-      return this;
-    }
-
-    public Builder withParams(Map<String, String> args){
-      this.params = args;
-      return this;
-    }
-
-    public Builder withResult(String result){
-      this.ret = result;
-      return this;
-    }
-
-    public Builder withException(Throwable ex){
-      this.throwable = ex;
-      return this;
-    }
-
-    public AuditMessage build(){
-      AuditMessage auditMessage = new AuditMessage();
-      auditMessage.message = String.format(MSG_PATTERN,
-          this.user, this.ip, this.op, this.params, this.ret);
-      auditMessage.throwable = this.throwable;
-      return auditMessage;
-    }
-  }
-}

+ 0 - 32
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java

@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import java.util.Map;
-
-/**
- * Interface to make an entity auditable.
- */
-public interface Auditable {
-  /**
-   * Must override in implementation.
-   * @return Map<String, String> with values to be logged in audit.
-   */
-  Map<String, String> toAuditMap();
-}
-

+ 0 - 33
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java

@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import java.util.Map;
-
-/**
- * Interface to mark an actor as Auditor.
- */
-public interface Auditor {
-
-  AuditMessage buildAuditMessageForSuccess(
-      AuditAction op, Map<String, String> auditMap);
-
-  AuditMessage buildAuditMessageForFailure(
-      AuditAction op, Map<String, String> auditMap, Throwable throwable);
-
-}

+ 0 - 138
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java

@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-/**
- ******************************************************************************
- *                              Important
- * 1. Any changes to classes in this package can render the logging
- * framework broken.
- * 2. The logger framework has been designed keeping in mind future
- * plans to build a log parser.
- * 3. Please exercise great caution when attempting changes in this package.
- ******************************************************************************
- *
- *
- * This package lays the foundation for Audit logging in Ozone.
- * AuditLogging in Ozone has been built using log4j2 which brings in new
- * features that facilitate turning on/off selective audit events by using
- * MarkerFilter, checking for change in logging configuration periodically
- * and reloading the changes, use of disruptor framework for improved
- * Asynchronous logging.
- *
- * The log4j2 configurations can be specified in XML, YAML, JSON and
- * Properties file. For Ozone, we are using the Properties file due to sheer
- * simplicity, readability and ease of modification.
- *
- * log4j2 configuration file can be passed to startup command with option
- * -Dlog4j.configurationFile unlike -Dlog4j.configuration in log4j 1.x
- *
- ******************************************************************************
- *          Understanding the Audit Logging framework in Ozone.
- ******************************************************************************
- * **** Auditable ***
- * This is an interface to mark an entity as auditable.
- * This interface must be implemented by entities requiring audit logging.
- * For example - OMVolumeArgs, OMBucketArgs.
- * The implementing class must override toAuditMap() to return an
- * instance of Map<Key, Value> where both Key and Value are String.
- *
- * Key: must contain printable US ASCII characters
- * May not contain a space, =, ], or "
- * If the key is multi word then use camel case.
- *
- * Value: if it is a collection/array, then it must be converted to a comma
- * delimited string
- *
- * *** AuditAction ***
- * This is an interface to define the various type of actions to be audited.
- * To ensure separation of concern, for each sub-component you must create an
- * Enum to implement AuditAction.
- * Structure of Enum can be referred from the test class DummyAction.
- *
- * For starters, we expect following 3 implementations of AuditAction:
- * OMAction - to define action types for Ozone Manager
- * SCMAction - to define action types for Storage Container manager
- * DNAction - to define action types for Datanode
- *
- * *** AuditEventStatus ***
- * Enum to define Audit event status like success and failure.
- * This is used in AuditLogger.logXXX() methods.
- *
- *  * *** AuditLogger ***
- * This is where the audit logging magic unfolds.
- * The class has 2 Markers defined - READ and WRITE.
- * These markers are used to tag when logging events.
- *
- * *** AuditLoggerType ***
- * Enum to define the various AuditLoggers in Ozone
- *
- * *** AuditMarker ***
- * Enum to define various Audit Markers used in AuditLogging.
- *
- * *** AuditMessage ***
- * Entity to define an audit message to be logged
- * It will generate a message formatted as:
- * user=xxx ip=xxx op=XXXX_XXXX {key=val, key1=val1..} ret=XXXXXX
- *
- * *** Auditor ***
- * Interface to mark an actor class as Auditor
- * Must be implemented by class where we want to log audit events
- * Implementing class must override and implement methods
- * buildAuditMessageForSuccess and buildAuditMessageForFailure.
- *
- * ****************************************************************************
- *                              Usage
- * ****************************************************************************
- * Using the AuditLogger to log events:
- * 1. Get a logger by specifying the appropriate logger type
- * Example: ExtendedLogger AUDIT = new AuditLogger(AuditLoggerType.OMLogger)
- *
- * 2. Construct an instance of AuditMessage
- *
- * 3. Log Read/Write and Success/Failure event as needed.
- * Example
- * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params))
- *
- * 4. Log Level implicitly defaults to INFO for xxxxSuccess() and ERROR for
- * xxxxFailure()
- * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params))
- * AUDIT.logWriteFailure(buildAuditMessageForSuccess(params))
- *
- * See sample invocations in src/test in the following class:
- * org.apache.hadoop.ozone.audit.TestOzoneAuditLogger
- *
- * ****************************************************************************
- *                      Defining new Logger types
- * ****************************************************************************
- * New Logger type can be added with following steps:
- * 1. Update AuditLoggerType to add the new type
- * 2. Create new Enum by implementing AuditAction if needed
- * 3. Ensure the required entity implements Auditable
- *
- * ****************************************************************************
- *                      Defining new Marker types
- * ****************************************************************************
- * New Markers can be configured as follows:
- * 1. Define new markers in AuditMarker
- * 2. Get the Marker in AuditLogger for use in the log methods, example:
- * private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
- * 3. Define log methods in AuditLogger to use the new Marker type
- * 4. Call these new methods from the required classes to audit with these
- * new markers
- * 5. The marker based filtering can be configured in log4j2 configurations
- * Refer log4j2.properties in src/test/resources for a sample.
- */

+ 0 - 97
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java

@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .KeyBlocks;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * A group of blocks relations relevant, e.g belong to a certain object key.
- */
-public final class BlockGroup {
-
-  private String groupID;
-  private List<BlockID> blockIDs;
-  private BlockGroup(String groupID, List<BlockID> blockIDs) {
-    this.groupID = groupID;
-    this.blockIDs = blockIDs;
-  }
-
-  public List<BlockID> getBlockIDList() {
-    return blockIDs;
-  }
-
-  public String getGroupID() {
-    return groupID;
-  }
-
-  public KeyBlocks getProto() {
-    KeyBlocks.Builder kbb = KeyBlocks.newBuilder();
-    for (BlockID block : blockIDs) {
-      kbb.addBlocks(block.getProtobuf());
-    }
-    return kbb.setKey(groupID).build();
-  }
-
-  /**
-   * Parses a KeyBlocks proto to a group of blocks.
-   * @param proto KeyBlocks proto.
-   * @return a group of blocks.
-   */
-  public static BlockGroup getFromProto(KeyBlocks proto) {
-    List<BlockID> blockIDs = new ArrayList<>();
-    for (HddsProtos.BlockID block : proto.getBlocksList()) {
-      blockIDs.add(new BlockID(block.getContainerID(), block.getLocalID()));
-    }
-    return BlockGroup.newBuilder().setKeyName(proto.getKey())
-        .addAllBlockIDs(blockIDs).build();
-  }
-
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * BlockGroup instance builder.
-   */
-  public static class Builder {
-
-    private String groupID;
-    private List<BlockID> blockIDs;
-
-    public Builder setKeyName(String blockGroupID) {
-      this.groupID = blockGroupID;
-      return this;
-    }
-
-    public Builder addAllBlockIDs(List<BlockID> keyBlocks) {
-      this.blockIDs = keyBlocks;
-      return this;
-    }
-
-    public BlockGroup build() {
-      return new BlockGroup(groupID, blockIDs);
-    }
-  }
-
-}

+ 0 - 97
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java

@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmBlockResult;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmBlockResult.Result;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Result to delete a group of blocks.
- */
-public class DeleteBlockGroupResult {
-  private String objectKey;
-  private List<DeleteBlockResult> blockResultList;
-  public DeleteBlockGroupResult(String objectKey,
-      List<DeleteBlockResult> blockResultList) {
-    this.objectKey = objectKey;
-    this.blockResultList = blockResultList;
-  }
-
-  public String getObjectKey() {
-    return objectKey;
-  }
-
-  public List<DeleteBlockResult> getBlockResultList() {
-    return blockResultList;
-  }
-
-  public List<DeleteScmBlockResult> getBlockResultProtoList() {
-    List<DeleteScmBlockResult> resultProtoList =
-        new ArrayList<>(blockResultList.size());
-    for (DeleteBlockResult result : blockResultList) {
-      DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder()
-          .setBlockID(result.getBlockID().getProtobuf())
-          .setResult(result.getResult()).build();
-      resultProtoList.add(proto);
-    }
-    return resultProtoList;
-  }
-
-  public static List<DeleteBlockResult> convertBlockResultProto(
-      List<DeleteScmBlockResult> results) {
-    List<DeleteBlockResult> protoResults = new ArrayList<>(results.size());
-    for (DeleteScmBlockResult result : results) {
-      protoResults.add(new DeleteBlockResult(BlockID.getFromProtobuf(
-          result.getBlockID()), result.getResult()));
-    }
-    return protoResults;
-  }
-
-  /**
-   * Only if all blocks are successfully deleted, this group is considered
-   * to be successfully executed.
-   *
-   * @return true if all blocks are successfully deleted, false otherwise.
-   */
-  public boolean isSuccess() {
-    for (DeleteBlockResult result : blockResultList) {
-      if (result.getResult() != Result.success) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * @return A list of deletion failed block IDs.
-   */
-  public List<BlockID> getFailedBlocks() {
-    List<BlockID> failedBlocks = blockResultList.stream()
-        .filter(result -> result.getResult() != Result.success)
-        .map(DeleteBlockResult::getBlockID).collect(Collectors.toList());
-    return failedBlocks;
-  }
-}

+ 0 - 51
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java

@@ -1,51 +0,0 @@
-/**
-  * Licensed to the Apache Software Foundation (ASF) under one
-  * or more contributor license agreements.  See the NOTICE file
-  * distributed with this work for additional information
-  * regarding copyright ownership.  The ASF licenses this file
-  * to you under the Apache License, Version 2.0 (the
-  * "License"); you may not use this file except in compliance
-  * with the License.  You may obtain a copy of the License at
-  *
-  *     http://www.apache.org/licenses/LICENSE-2.0
-  *
-  * Unless required by applicable law or agreed to in writing, software
-  * distributed under the License is distributed on an "AS IS" BASIS,
-  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  * See the License for the specific language governing permissions and
-  * limitations under the License.
-  */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import java.io.File;
-import java.io.IOException;
-
-/**
- * The exception is thrown when file system state is inconsistent
- * and is not recoverable.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class InconsistentStorageStateException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public InconsistentStorageStateException(String descr) {
-    super(descr);
-  }
-
-  public InconsistentStorageStateException(File dir, String descr) {
-    super("Directory " + getFilePath(dir) + " is in an inconsistent state: "
-        + descr);
-  }
-
-  private static String getFilePath(File dir) {
-    try {
-      return dir.getCanonicalPath();
-    } catch (IOException e) {
-    }
-    return dir.getPath();
-  }
-}

+ 0 - 249
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java

@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.DirectoryStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Properties;
-
-/**
- * Storage information file. This Class defines the methods to check
- * the consistency of the storage dir and the version file.
- * <p>
- * Local storage information is stored in a separate file VERSION.
- * It contains type of the node,
- * the storage layout version, the SCM id, and
- * the OM/SCM state creation time.
- *
- */
-@InterfaceAudience.Private
-public abstract class Storage {
-  private static final Logger LOG = LoggerFactory.getLogger(Storage.class);
-
-  public static final String STORAGE_DIR_CURRENT = "current";
-  protected static final String STORAGE_FILE_VERSION = "VERSION";
-  public static final String CONTAINER_DIR = "containerDir";
-
-  private final NodeType nodeType;
-  private final File root;
-  private final File storageDir;
-
-  private StorageState state;
-  private StorageInfo storageInfo;
-
-
-  /**
-   * Determines the state of the Version file.
-   */
-  public enum StorageState {
-    NON_EXISTENT, NOT_INITIALIZED, INITIALIZED
-  }
-
-  public Storage(NodeType type, File root, String sdName)
-      throws IOException {
-    this.nodeType = type;
-    this.root = root;
-    this.storageDir = new File(root, sdName);
-    this.state = getStorageState();
-    if (state == StorageState.INITIALIZED) {
-      this.storageInfo = new StorageInfo(type, getVersionFile());
-    } else {
-      this.storageInfo = new StorageInfo(
-          nodeType, StorageInfo.newClusterID(), Time.now());
-      setNodeProperties();
-    }
-  }
-
-  /**
-   * Gets the path of the Storage dir.
-   * @return Stoarge dir path
-   */
-  public String getStorageDir() {
-    return storageDir.getAbsoluteFile().toString();
-  }
-
-  /**
-   * Gets the state of the version file.
-   * @return the state of the Version file
-   */
-  public StorageState getState() {
-    return state;
-  }
-
-  public NodeType getNodeType() {
-    return storageInfo.getNodeType();
-  }
-
-  public String getClusterID() {
-    return storageInfo.getClusterID();
-  }
-
-  public long getCreationTime() {
-    return storageInfo.getCreationTime();
-  }
-
-  public void setClusterId(String clusterId) throws IOException {
-    if (state == StorageState.INITIALIZED) {
-      throw new IOException(
-          "Storage directory " + storageDir + " already initialized.");
-    } else {
-      storageInfo.setClusterId(clusterId);
-    }
-  }
-
-  /**
-   * Retreives the storageInfo instance to read/write the common
-   * version file properties.
-   * @return the instance of the storageInfo class
-   */
-  protected StorageInfo getStorageInfo() {
-    return storageInfo;
-  }
-
-  abstract protected Properties getNodeProperties();
-
-  /**
-   * Sets the Node properties spaecific to OM/SCM.
-   */
-  private void setNodeProperties() {
-    Properties nodeProperties = getNodeProperties();
-    if (nodeProperties != null) {
-      for (String key : nodeProperties.stringPropertyNames()) {
-        storageInfo.setProperty(key, nodeProperties.getProperty(key));
-      }
-    }
-  }
-
-  /**
-   * Directory {@code current} contains latest files defining
-   * the file system meta-data.
-   *
-   * @return the directory path
-   */
-  private File getCurrentDir() {
-    return new File(storageDir, STORAGE_DIR_CURRENT);
-  }
-
-  /**
-   * File {@code VERSION} contains the following fields:
-   * <ol>
-   * <li>node type</li>
-   * <li>OM/SCM state creation time</li>
-   * <li>other fields specific for this node type</li>
-   * </ol>
-   * The version file is always written last during storage directory updates.
-   * The existence of the version file indicates that all other files have
-   * been successfully written in the storage directory, the storage is valid
-   * and does not need to be recovered.
-   *
-   * @return the version file path
-   */
-  private File getVersionFile() {
-    return new File(getCurrentDir(), STORAGE_FILE_VERSION);
-  }
-
-
-  /**
-   * Check to see if current/ directory is empty. This method is used
-   * before determining to format the directory.
-   * @throws IOException if unable to list files under the directory.
-   */
-  private void checkEmptyCurrent() throws IOException {
-    File currentDir = getCurrentDir();
-    if (!currentDir.exists()) {
-      // if current/ does not exist, it's safe to format it.
-      return;
-    }
-    try (DirectoryStream<Path> dirStream = Files
-        .newDirectoryStream(currentDir.toPath())) {
-      if (dirStream.iterator().hasNext()) {
-        throw new InconsistentStorageStateException(getCurrentDir(),
-            "Can't initialize the storage directory because the current "
-                + "it is not empty.");
-      }
-    }
-  }
-
-  /**
-   * Check consistency of the storage directory.
-   *
-   * @return state {@link StorageState} of the storage directory
-   * @throws IOException
-   */
-  private StorageState getStorageState() throws IOException {
-    assert root != null : "root is null";
-    String rootPath = root.getCanonicalPath();
-    try { // check that storage exists
-      if (!root.exists()) {
-        // storage directory does not exist
-        LOG.warn("Storage directory " + rootPath + " does not exist");
-        return StorageState.NON_EXISTENT;
-      }
-      // or is inaccessible
-      if (!root.isDirectory()) {
-        LOG.warn(rootPath + "is not a directory");
-        return StorageState.NON_EXISTENT;
-      }
-      if (!FileUtil.canWrite(root)) {
-        LOG.warn("Cannot access storage directory " + rootPath);
-        return StorageState.NON_EXISTENT;
-      }
-    } catch (SecurityException ex) {
-      LOG.warn("Cannot access storage directory " + rootPath, ex);
-      return StorageState.NON_EXISTENT;
-    }
-
-    // check whether current directory is valid
-    File versionFile = getVersionFile();
-    boolean hasCurrent = versionFile.exists();
-
-    if (hasCurrent) {
-      return StorageState.INITIALIZED;
-    } else {
-      checkEmptyCurrent();
-      return StorageState.NOT_INITIALIZED;
-    }
-  }
-
-  /**
-   * Creates the Version file if not present,
-   * otherwise returns with IOException.
-   * @throws IOException
-   */
-  public void initialize() throws IOException {
-    if (state == StorageState.INITIALIZED) {
-      throw new IOException("Storage directory already initialized.");
-    }
-    if (!getCurrentDir().mkdirs()) {
-      throw new IOException("Cannot create directory " + getCurrentDir());
-    }
-    storageInfo.writeTo(getVersionFile());
-  }
-
-}
-

+ 0 - 183
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java

@@ -1,183 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * Common class for storage information. This class defines the common
- * properties and functions to set them , write them into the version file
- * and read them from the version file.
- *
- */
-@InterfaceAudience.Private
-public class StorageInfo {
-
-  private Properties properties = new Properties();
-
-  /**
-   * Property to hold node type.
-   */
-  private static final String NODE_TYPE = "nodeType";
-  /**
-   * Property to hold ID of the cluster.
-   */
-  private static final String CLUSTER_ID = "clusterID";
-  /**
-   * Property to hold creation time of the storage.
-   */
-  private static final String CREATION_TIME = "cTime";
-
-  /**
-   * Constructs StorageInfo instance.
-   * @param type
-   *          Type of the node using the storage
-   * @param cid
-   *          Cluster ID
-   * @param cT
-   *          Cluster creation Time
-
-   * @throws IOException
-   */
-  public StorageInfo(NodeType type, String cid, long cT)
-      throws IOException {
-    Preconditions.checkNotNull(type);
-    Preconditions.checkNotNull(cid);
-    Preconditions.checkNotNull(cT);
-    properties.setProperty(NODE_TYPE, type.name());
-    properties.setProperty(CLUSTER_ID, cid);
-    properties.setProperty(CREATION_TIME, String.valueOf(cT));
-  }
-
-  public StorageInfo(NodeType type, File propertiesFile)
-      throws IOException {
-    this.properties = readFrom(propertiesFile);
-    verifyNodeType(type);
-    verifyClusterId();
-    verifyCreationTime();
-  }
-
-  public NodeType getNodeType() {
-    return NodeType.valueOf(properties.getProperty(NODE_TYPE));
-  }
-
-  public String getClusterID() {
-    return properties.getProperty(CLUSTER_ID);
-  }
-
-  public Long  getCreationTime() {
-    String creationTime = properties.getProperty(CREATION_TIME);
-    if(creationTime != null) {
-      return Long.parseLong(creationTime);
-    }
-    return null;
-  }
-
-  public String getProperty(String key) {
-    return properties.getProperty(key);
-  }
-
-  public void setProperty(String key, String value) {
-    properties.setProperty(key, value);
-  }
-
-  public void setClusterId(String clusterId) {
-    properties.setProperty(CLUSTER_ID, clusterId);
-  }
-
-  private void verifyNodeType(NodeType type)
-      throws InconsistentStorageStateException {
-    NodeType nodeType = getNodeType();
-    Preconditions.checkNotNull(nodeType);
-    if(type != nodeType) {
-      throw new InconsistentStorageStateException("Expected NodeType: " + type +
-          ", but found: " + nodeType);
-    }
-  }
-
-  private void verifyClusterId()
-      throws InconsistentStorageStateException {
-    String clusterId = getClusterID();
-    Preconditions.checkNotNull(clusterId);
-    if(clusterId.isEmpty()) {
-      throw new InconsistentStorageStateException("Cluster ID not found");
-    }
-  }
-
-  private void verifyCreationTime() {
-    Long creationTime = getCreationTime();
-    Preconditions.checkNotNull(creationTime);
-  }
-
-
-  public void writeTo(File to)
-      throws IOException {
-    try (RandomAccessFile file = new RandomAccessFile(to, "rws");
-         FileOutputStream out = new FileOutputStream(file.getFD())) {
-      file.seek(0);
-    /*
-     * If server is interrupted before this line,
-     * the version file will remain unchanged.
-     */
-      properties.store(out, null);
-    /*
-     * Now the new fields are flushed to the head of the file, but file
-     * length can still be larger then required and therefore the file can
-     * contain whole or corrupted fields from its old contents in the end.
-     * If server is interrupted here and restarted later these extra fields
-     * either should not effect server behavior or should be handled
-     * by the server correctly.
-     */
-      file.setLength(out.getChannel().position());
-    }
-  }
-
-  private Properties readFrom(File from) throws IOException {
-    try (RandomAccessFile file = new RandomAccessFile(from, "rws");
-        FileInputStream in = new FileInputStream(file.getFD())) {
-      Properties props = new Properties();
-      file.seek(0);
-      props.load(in);
-      return props;
-    }
-  }
-
-  /**
-   * Generate new clusterID.
-   *
-   * clusterID is a persistent attribute of the cluster.
-   * It is generated when the cluster is created and remains the same
-   * during the life cycle of the cluster.  When a new SCM node is initialized,
-   * if this is a new cluster, a new clusterID is generated and stored.
-   * @return new clusterID
-   */
-  public static String newClusterID() {
-    return "CID-" + UUID.randomUUID().toString();
-  }
-
-}

+ 0 - 18
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java

@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.common;

+ 0 - 42
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java

@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.common.statemachine;
-
-/**
- * Class wraps invalid state transition exception.
- */
-public class InvalidStateTransitionException extends Exception {
-  private Enum<?> currentState;
-  private Enum<?> event;
-
-  public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
-    super("Invalid event: " + event + " at " + currentState + " state.");
-    this.currentState = currentState;
-    this.event = event;
-  }
-
-  public Enum<?> getCurrentState() {
-    return currentState;
-  }
-
-  public Enum<?> getEvent() {
-    return event;
-  }
-
-}

+ 0 - 68
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java

@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.common.statemachine;
-
-import com.google.common.base.Supplier;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Template class that wraps simple event driven state machine.
- * @param <STATE> states allowed
- * @param <EVENT> events allowed
- */
-public class StateMachine<STATE extends Enum<?>, EVENT extends Enum<?>> {
-  private STATE initialState;
-  private Set<STATE> finalStates;
-
-  private final LoadingCache<EVENT, Map<STATE, STATE>> transitions =
-      CacheBuilder.newBuilder().build(
-          CacheLoader.from((Supplier<Map<STATE, STATE>>) () -> new HashMap()));
-
-  public StateMachine(STATE initState, Set<STATE> finalStates) {
-    this.initialState = initState;
-    this.finalStates = finalStates;
-  }
-
-  public STATE getInitialState() {
-    return initialState;
-  }
-
-  public Set<STATE> getFinalStates() {
-    return finalStates;
-  }
-
-  public STATE getNextState(STATE from, EVENT e)
-      throws InvalidStateTransitionException {
-    STATE target = transitions.getUnchecked(e).get(from);
-    if (target == null) {
-      throw new InvalidStateTransitionException(from, e);
-    }
-    return target;
-  }
-
-  public void addTransition(STATE from, STATE to, EVENT e) {
-    transitions.getUnchecked(e).put(from, to);
-  }
-}

+ 0 - 21
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java

@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common.statemachine;
-/**
- state machine template class for ozone.
- **/

+ 0 - 255
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java

@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-import com.google.common.base.Preconditions;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.ArrayList;
-
-/**
- * Helper class to convert Protobuf to Java classes.
- */
-public class BlockData {
-  private final BlockID blockID;
-  private final Map<String, String> metadata;
-
-  /**
-   * Represent a list of chunks.
-   * In order to reduce memory usage, chunkList is declared as an
-   * {@link Object}.
-   * When #elements == 0, chunkList is null.
-   * When #elements == 1, chunkList refers to the only element.
-   * When #elements > 1, chunkList refers to the list.
-   *
-   * Please note : when we are working with blocks, we don't care what they
-   * point to. So we We don't read chunkinfo nor validate them. It is
-   * responsibility of higher layer like ozone. We just read and write data
-   * from network.
-   */
-  private Object chunkList;
-
-  /**
-   * total size of the key.
-   */
-  private long size;
-
-  /**
-   * Constructs a BlockData Object.
-   *
-   * @param blockID
-   */
-  public BlockData(BlockID blockID) {
-    this.blockID = blockID;
-    this.metadata = new TreeMap<>();
-    this.size = 0;
-  }
-
-  /**
-   * Returns a blockData object from the protobuf data.
-   *
-   * @param data - Protobuf data.
-   * @return - BlockData
-   * @throws IOException
-   */
-  public static BlockData getFromProtoBuf(ContainerProtos.BlockData data) throws
-      IOException {
-    BlockData blockData = new BlockData(
-        BlockID.getFromProtobuf(data.getBlockID()));
-    for (int x = 0; x < data.getMetadataCount(); x++) {
-      blockData.addMetadata(data.getMetadata(x).getKey(),
-          data.getMetadata(x).getValue());
-    }
-    blockData.setChunks(data.getChunksList());
-    if (data.hasSize()) {
-      Preconditions.checkArgument(data.getSize() == blockData.getSize());
-    }
-    return blockData;
-  }
-
-  /**
-   * Returns a Protobuf message from BlockData.
-   * @return Proto Buf Message.
-   */
-  public ContainerProtos.BlockData getProtoBufMessage() {
-    ContainerProtos.BlockData.Builder builder =
-        ContainerProtos.BlockData.newBuilder();
-    builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-    builder.addAllChunks(getChunks());
-    builder.setSize(size);
-    return builder.build();
-  }
-
-  /**
-   * Adds metadata.
-   *
-   * @param key   - Key
-   * @param value - Value
-   * @throws IOException
-   */
-  public synchronized void addMetadata(String key, String value) throws
-      IOException {
-    if (this.metadata.containsKey(key)) {
-      throw new IOException("This key already exists. Key " + key);
-    }
-    metadata.put(key, value);
-  }
-
-  public synchronized Map<String, String> getMetadata() {
-    return Collections.unmodifiableMap(this.metadata);
-  }
-
-  /**
-   * Returns value of a key.
-   */
-  public synchronized String getValue(String key) {
-    return metadata.get(key);
-  }
-
-  /**
-   * Deletes a metadata entry from the map.
-   *
-   * @param key - Key
-   */
-  public synchronized void deleteKey(String key) {
-    metadata.remove(key);
-  }
-
-  @SuppressWarnings("unchecked")
-  private List<ContainerProtos.ChunkInfo> castChunkList() {
-    return (List<ContainerProtos.ChunkInfo>)chunkList;
-  }
-
-  /**
-   * Returns chunks list.
-   *
-   * @return list of chunkinfo.
-   */
-  public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunkList == null? Collections.emptyList()
-        : chunkList instanceof ContainerProtos.ChunkInfo?
-            Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
-        : Collections.unmodifiableList(castChunkList());
-  }
-
-  /**
-   * Adds chinkInfo to the list.
-   */
-  public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    if (chunkList == null) {
-      chunkList = chunkInfo;
-    } else {
-      final List<ContainerProtos.ChunkInfo> list;
-      if (chunkList instanceof ContainerProtos.ChunkInfo) {
-        list = new ArrayList<>(2);
-        list.add((ContainerProtos.ChunkInfo)chunkList);
-        chunkList = list;
-      } else {
-        list = castChunkList();
-      }
-      list.add(chunkInfo);
-    }
-    size += chunkInfo.getLen();
-  }
-
-  /**
-   * removes the chunk.
-   */
-  public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    final boolean removed;
-    if (chunkList instanceof List) {
-      final List<ContainerProtos.ChunkInfo> list = castChunkList();
-      removed = list.remove(chunkInfo);
-      if (list.size() == 1) {
-        chunkList = list.get(0);
-      }
-    } else if (chunkInfo.equals(chunkList)) {
-      chunkList = null;
-      removed = true;
-    } else {
-      removed = false;
-    }
-
-    if (removed) {
-      size -= chunkInfo.getLen();
-    }
-    return removed;
-  }
-
-  /**
-   * Returns container ID.
-   *
-   * @return long.
-   */
-  public long getContainerID() {
-    return blockID.getContainerID();
-  }
-
-  /**
-   * Returns LocalID.
-   * @return long.
-   */
-  public long getLocalID() {
-    return blockID.getLocalID();
-  }
-
-  /**
-   * Return Block ID.
-   * @return BlockID.
-   */
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  /**
-   * Sets Chunk list.
-   *
-   * @param chunks - List of chunks.
-   */
-  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
-    if (chunks == null) {
-      chunkList = null;
-      size = 0L;
-    } else {
-      final int n = chunks.size();
-      chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
-      size = chunks.parallelStream().mapToLong(
-          ContainerProtos.ChunkInfo::getLen).sum();
-    }
-  }
-
-  /**
-   * Get the total size of chunks allocated for the key.
-   * @return total size of the key.
-   */
-  public long getSize() {
-    return size;
-  }
-}

+ 0 - 184
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java

@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * Java class that represents ChunkInfo ProtoBuf class. This helper class allows
- * us to convert to and from protobuf to normal java.
- */
-public class ChunkInfo {
-  private final String chunkName;
-  private final long offset;
-  private final long len;
-  private String checksum;
-  private final Map<String, String> metadata;
-
-
-  /**
-   * Constructs a ChunkInfo.
-   *
-   * @param chunkName - File Name where chunk lives.
-   * @param offset    - offset where Chunk Starts.
-   * @param len       - Length of the Chunk.
-   */
-  public ChunkInfo(String chunkName, long offset, long len) {
-    this.chunkName = chunkName;
-    this.offset = offset;
-    this.len = len;
-    this.metadata = new TreeMap<>();
-  }
-
-  /**
-   * Adds metadata.
-   *
-   * @param key   - Key Name.
-   * @param value - Value.
-   * @throws IOException
-   */
-  public void addMetadata(String key, String value) throws IOException {
-    synchronized (this.metadata) {
-      if (this.metadata.containsKey(key)) {
-        throw new IOException("This key already exists. Key " + key);
-      }
-      metadata.put(key, value);
-    }
-  }
-
-  /**
-   * Gets a Chunkinfo class from the protobuf definitions.
-   *
-   * @param info - Protobuf class
-   * @return ChunkInfo
-   * @throws IOException
-   */
-  public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info)
-      throws IOException {
-    Preconditions.checkNotNull(info);
-
-    ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(),
-        info.getLen());
-
-    for (int x = 0; x < info.getMetadataCount(); x++) {
-      chunkInfo.addMetadata(info.getMetadata(x).getKey(),
-          info.getMetadata(x).getValue());
-    }
-
-
-    if (info.hasChecksum()) {
-      chunkInfo.setChecksum(info.getChecksum());
-    }
-    return chunkInfo;
-  }
-
-  /**
-   * Returns a ProtoBuf Message from ChunkInfo.
-   *
-   * @return Protocol Buffer Message
-   */
-  public ContainerProtos.ChunkInfo getProtoBufMessage() {
-    ContainerProtos.ChunkInfo.Builder builder = ContainerProtos
-        .ChunkInfo.newBuilder();
-
-    builder.setChunkName(this.getChunkName());
-    builder.setOffset(this.getOffset());
-    builder.setLen(this.getLen());
-    if (this.getChecksum() != null && !this.getChecksum().isEmpty()) {
-      builder.setChecksum(this.getChecksum());
-    }
-
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-
-    return builder.build();
-  }
-
-  /**
-   * Returns the chunkName.
-   *
-   * @return - String
-   */
-  public String getChunkName() {
-    return chunkName;
-  }
-
-  /**
-   * Gets the start offset of the given chunk in physical file.
-   *
-   * @return - long
-   */
-  public long getOffset() {
-    return offset;
-  }
-
-  /**
-   * Returns the length of the Chunk.
-   *
-   * @return long
-   */
-  public long getLen() {
-    return len;
-  }
-
-  /**
-   * Returns the SHA256 value of this chunk.
-   *
-   * @return - Hash String
-   */
-  public String getChecksum() {
-    return checksum;
-  }
-
-  /**
-   * Sets the Hash value of this chunk.
-   *
-   * @param checksum - Hash String.
-   */
-  public void setChecksum(String checksum) {
-    this.checksum = checksum;
-  }
-
-  /**
-   * Returns Metadata associated with this Chunk.
-   *
-   * @return - Map of Key,values.
-   */
-  public Map<String, String> getMetadata() {
-    return metadata;
-  }
-
-  @Override
-  public String toString() {
-    return "ChunkInfo{" +
-        "chunkName='" + chunkName +
-        ", offset=" + offset +
-        ", len=" + len +
-        '}';
-  }
-}

+ 0 - 23
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-/**
- * Helper classes for the container protocol communication.
- */

+ 0 - 189
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java

@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.apache.hadoop.util.Time;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.Callable;
-
-/**
- * This class represents the lease created on a resource. Callback can be
- * registered on the lease which will be executed in case of timeout.
- *
- * @param <T> Resource type for which the lease can be associated
- */
-public class Lease<T> {
-
-  /**
-   * The resource for which this lease is created.
-   */
-  private final T resource;
-
-  private final long creationTime;
-
-  /**
-   * Lease lifetime in milliseconds.
-   */
-  private volatile long leaseTimeout;
-
-  private boolean expired;
-
-  /**
-   * Functions to be called in case of timeout.
-   */
-  private List<Callable<Void>> callbacks;
-
-
-  /**
-   * Creates a lease on the specified resource with given timeout.
-   *
-   * @param resource
-   *        Resource for which the lease has to be created
-   * @param timeout
-   *        Lease lifetime in milliseconds
-   */
-  public Lease(T resource, long timeout) {
-    this.resource = resource;
-    this.leaseTimeout = timeout;
-    this.callbacks = Collections.synchronizedList(new ArrayList<>());
-    this.creationTime = Time.monotonicNow();
-    this.expired = false;
-  }
-
-  /**
-   * Returns true if the lease has expired, else false.
-   *
-   * @return true if expired, else false
-   */
-  public boolean hasExpired() {
-    return expired;
-  }
-
-  /**
-   * Registers a callback which will be executed in case of timeout. Callbacks
-   * are executed in a separate Thread.
-   *
-   * @param callback
-   *        The Callable which has to be executed
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public void registerCallBack(Callable<Void> callback)
-      throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    callbacks.add(callback);
-  }
-
-  /**
-   * Returns the time elapsed since the creation of lease.
-   *
-   * @return elapsed time in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public long getElapsedTime() throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    return Time.monotonicNow() - creationTime;
-  }
-
-  /**
-   * Returns the time available before timeout.
-   *
-   * @return remaining time in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public long getRemainingTime() throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    return leaseTimeout - getElapsedTime();
-  }
-
-  /**
-   * Returns total lease lifetime.
-   *
-   * @return total lifetime of lease in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public long getLeaseLifeTime() throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    return leaseTimeout;
-  }
-
-  /**
-   * Renews the lease timeout period.
-   *
-   * @param timeout
-   *        Time to be added to the lease in milliseconds
-   * @throws LeaseExpiredException
-   *         If the lease has already timed out
-   */
-  public void renew(long timeout) throws LeaseExpiredException {
-    if(hasExpired()) {
-      throw new LeaseExpiredException("Resource: " + resource);
-    }
-    leaseTimeout += timeout;
-  }
-
-  @Override
-  public int hashCode() {
-    return resource.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if(obj instanceof Lease) {
-      return resource.equals(((Lease) obj).resource);
-    }
-    return false;
-  }
-
-  @Override
-  public String toString() {
-    return "Lease<" + resource.toString() + ">";
-  }
-
-  /**
-   * Returns the callbacks to be executed for the lease in case of timeout.
-   *
-   * @return callbacks to be executed
-   */
-  List<Callable<Void>> getCallbacks() {
-    return callbacks;
-  }
-
-  /**
-   * Expires/Invalidates the lease.
-   */
-  void invalidate() {
-    callbacks = null;
-    expired = true;
-  }
-
-}

+ 0 - 46
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java

@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that there is already a lease acquired on the
- * same resource.
- */
-public class LeaseAlreadyExistException  extends LeaseException {
-
-  /**
-   * Constructs an {@code LeaseAlreadyExistException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseAlreadyExistException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseAlreadyExistException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseAlreadyExistException(String message) {
-    super(message);
-  }
-
-}

+ 0 - 65
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java

@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.Callable;
-
-/**
- * This class is responsible for executing the callbacks of a lease in case of
- * timeout.
- */
-public class LeaseCallbackExecutor<T> implements Runnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Lease.class);
-
-  private final T resource;
-  private final List<Callable<Void>> callbacks;
-
-  /**
-   * Constructs LeaseCallbackExecutor instance with list of callbacks.
-   *
-   * @param resource
-   *        The resource for which the callbacks are executed
-   * @param callbacks
-   *        Callbacks to be executed by this executor
-   */
-  public LeaseCallbackExecutor(T resource, List<Callable<Void>> callbacks) {
-    this.resource = resource;
-    this.callbacks = callbacks;
-  }
-
-  @Override
-  public void run() {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Executing callbacks for lease on {}", resource);
-    }
-    for(Callable<Void> callback : callbacks) {
-      try {
-        callback.call();
-      } catch (Exception e) {
-        LOG.warn("Exception while executing callback for lease on {}",
-            resource, e);
-      }
-    }
-  }
-
-}

+ 0 - 45
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java

@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents all lease related exceptions.
- */
-public class LeaseException extends Exception {
-
-  /**
-   * Constructs an {@code LeaseException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseException(String message) {
-    super(message);
-  }
-
-}

+ 0 - 45
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java

@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that the lease that is being accessed has expired.
- */
-public class LeaseExpiredException extends LeaseException {
-
-  /**
-   * Constructs an {@code LeaseExpiredException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseExpiredException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseExpiredException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseExpiredException(String message) {
-    super(message);
-  }
-
-}

+ 0 - 251
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java

@@ -1,251 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-/**
- * LeaseManager is someone who can provide you leases based on your
- * requirement. If you want to return the lease back before it expires,
- * you can give it back to Lease Manager. He is the one responsible for
- * the lifecycle of leases. The resource for which lease is created
- * should have proper {@code equals} method implementation, resource
- * equality is checked while the lease is created.
- *
- * @param <T> Type of leases that this lease manager can create
- */
-public class LeaseManager<T> {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(LeaseManager.class);
-
-  private final String name;
-  private final long defaultTimeout;
-  private Map<T, Lease<T>> activeLeases;
-  private LeaseMonitor leaseMonitor;
-  private Thread leaseMonitorThread;
-  private boolean isRunning;
-
-  /**
-   * Creates an instance of lease manager.
-   *
-   * @param name
-   *        Name for the LeaseManager instance.
-   * @param defaultTimeout
-   *        Default timeout in milliseconds to be used for lease creation.
-   */
-  public LeaseManager(String name, long defaultTimeout) {
-    this.name = name;
-    this.defaultTimeout = defaultTimeout;
-  }
-
-  /**
-   * Starts the lease manager service.
-   */
-  public void start() {
-    LOG.debug("Starting {} LeaseManager service", name);
-    activeLeases = new ConcurrentHashMap<>();
-    leaseMonitor = new LeaseMonitor();
-    leaseMonitorThread = new Thread(leaseMonitor);
-    leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor");
-    leaseMonitorThread.setDaemon(true);
-    leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> {
-      // Let us just restart this thread after logging an error.
-      // if this thread is not running we cannot handle Lease expiry.
-      LOG.error("LeaseMonitor thread encountered an error. Thread: {}",
-          thread.toString(), throwable);
-      leaseMonitorThread.start();
-    });
-    LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name);
-    leaseMonitorThread.start();
-    isRunning = true;
-  }
-
-  /**
-   * Returns a lease for the specified resource with default timeout.
-   *
-   * @param resource
-   *        Resource for which lease has to be created
-   * @throws LeaseAlreadyExistException
-   *         If there is already a lease on the resource
-   */
-  public synchronized Lease<T> acquire(T resource)
-      throws LeaseAlreadyExistException {
-    return acquire(resource, defaultTimeout);
-  }
-
-  /**
-   * Returns a lease for the specified resource with the timeout provided.
-   *
-   * @param resource
-   *        Resource for which lease has to be created
-   * @param timeout
-   *        The timeout in milliseconds which has to be set on the lease
-   * @throws LeaseAlreadyExistException
-   *         If there is already a lease on the resource
-   */
-  public synchronized Lease<T> acquire(T resource, long timeout)
-      throws LeaseAlreadyExistException {
-    checkStatus();
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout);
-    }
-    if(activeLeases.containsKey(resource)) {
-      throw new LeaseAlreadyExistException("Resource: " + resource);
-    }
-    Lease<T> lease = new Lease<>(resource, timeout);
-    activeLeases.put(resource, lease);
-    leaseMonitorThread.interrupt();
-    return lease;
-  }
-
-  /**
-   * Returns a lease associated with the specified resource.
-   *
-   * @param resource
-   *        Resource for which the lease has to be returned
-   * @throws LeaseNotFoundException
-   *         If there is no active lease on the resource
-   */
-  public Lease<T> get(T resource) throws LeaseNotFoundException {
-    checkStatus();
-    Lease<T> lease = activeLeases.get(resource);
-    if(lease != null) {
-      return lease;
-    }
-    throw new LeaseNotFoundException("Resource: " + resource);
-  }
-
-  /**
-   * Releases the lease associated with the specified resource.
-   *
-   * @param resource
-   *        The for which the lease has to be released
-   * @throws LeaseNotFoundException
-   *         If there is no active lease on the resource
-   */
-  public synchronized void release(T resource)
-      throws LeaseNotFoundException {
-    checkStatus();
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Releasing lease on {}", resource);
-    }
-    Lease<T> lease = activeLeases.remove(resource);
-    if(lease == null) {
-      throw new LeaseNotFoundException("Resource: " + resource);
-    }
-    lease.invalidate();
-  }
-
-  /**
-   * Shuts down the LeaseManager and releases the resources. All the active
-   * {@link Lease} will be released (callbacks on leases will not be
-   * executed).
-   */
-  public void shutdown() {
-    checkStatus();
-    LOG.debug("Shutting down LeaseManager service");
-    leaseMonitor.disable();
-    leaseMonitorThread.interrupt();
-    for(T resource : activeLeases.keySet()) {
-      try {
-        release(resource);
-      }  catch(LeaseNotFoundException ex) {
-        //Ignore the exception, someone might have released the lease
-      }
-    }
-    isRunning = false;
-  }
-
-  /**
-   * Throws {@link LeaseManagerNotRunningException} if the service is not
-   * running.
-   */
-  private void checkStatus() {
-    if(!isRunning) {
-      throw new LeaseManagerNotRunningException("LeaseManager not running.");
-    }
-  }
-
-  /**
-   * Monitors the leases and expires them based on the timeout, also
-   * responsible for executing the callbacks of expired leases.
-   */
-  private final class LeaseMonitor implements Runnable {
-
-    private boolean monitor = true;
-    private ExecutorService executorService;
-
-    private LeaseMonitor() {
-      this.monitor = true;
-      this.executorService = Executors.newCachedThreadPool();
-    }
-
-    @Override
-    public void run() {
-      while(monitor) {
-        LOG.debug("{}-LeaseMonitor: checking for lease expiry", name);
-        long sleepTime = Long.MAX_VALUE;
-
-        for (T resource : activeLeases.keySet()) {
-          try {
-            Lease<T> lease = get(resource);
-            long remainingTime = lease.getRemainingTime();
-            if (remainingTime <= 0) {
-              //Lease has timed out
-              List<Callable<Void>> leaseCallbacks = lease.getCallbacks();
-              release(resource);
-              executorService.execute(
-                  new LeaseCallbackExecutor(resource, leaseCallbacks));
-            } else {
-              sleepTime = remainingTime > sleepTime ?
-                  sleepTime : remainingTime;
-            }
-          } catch (LeaseNotFoundException | LeaseExpiredException ex) {
-            //Ignore the exception, someone might have released the lease
-          }
-        }
-
-        try {
-          if(!Thread.interrupted()) {
-            Thread.sleep(sleepTime);
-          }
-        } catch (InterruptedException ignored) {
-          // This means a new lease is added to activeLeases.
-        }
-      }
-    }
-
-    /**
-     * Disables lease monitor, next interrupt call on the thread
-     * will stop lease monitor.
-     */
-    public void disable() {
-      monitor = false;
-    }
-  }
-
-}

+ 0 - 45
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java

@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lease;
-
-/**
- * This exception represents that there LeaseManager service is not running.
- */
-public class LeaseManagerNotRunningException  extends RuntimeException {
-
-  /**
-   * Constructs an {@code LeaseManagerNotRunningException} with {@code null}
-   * as its error detail message.
-   */
-  public LeaseManagerNotRunningException() {
-    super();
-  }
-
-  /**
-   * Constructs an {@code LeaseManagerNotRunningException} with the specified
-   * detail message.
-   *
-   * @param message
-   *        The detail message (which is saved for later retrieval
-   *        by the {@link #getMessage()} method)
-   */
-  public LeaseManagerNotRunningException(String message) {
-    super(message);
-  }
-
-}

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio