瀏覽代碼

HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils
Contributed by Nandakumar.

Anu Engineer 6 年之前
父節點
當前提交
c5629d546d
共有 80 個文件被更改,包括 226 次插入282 次删除
  1. 2 4
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
  2. 2 4
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
  3. 1 1
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
  4. 4 3
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
  5. 2 2
      hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
  6. 3 2
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
  7. 0 2
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
  8. 2 2
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
  9. 6 6
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
  10. 4 3
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
  11. 5 6
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
  12. 4 2
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
  13. 12 9
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
  14. 3 3
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
  15. 0 5
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
  16. 5 3
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
  17. 4 2
      hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
  18. 0 1
      hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
  19. 2 1
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
  20. 9 13
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
  21. 1 1
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
  22. 11 8
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
  23. 3 3
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
  24. 2 2
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
  25. 8 8
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java
  26. 4 2
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
  27. 0 7
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
  28. 0 2
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
  29. 2 3
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
  30. 2 1
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
  31. 4 2
      hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
  32. 0 3
      hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
  33. 9 9
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
  34. 2 1
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
  35. 2 2
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
  36. 2 2
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
  37. 2 2
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
  38. 4 3
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
  39. 1 1
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
  40. 1 1
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
  41. 2 2
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
  42. 0 2
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java
  43. 2 1
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
  44. 22 28
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java
  45. 3 3
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
  46. 4 3
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
  47. 2 1
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
  48. 4 4
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
  49. 0 1
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
  50. 4 3
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
  51. 10 12
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
  52. 0 1
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
  53. 4 3
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
  54. 1 1
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
  55. 4 4
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
  56. 2 2
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
  57. 4 7
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
  58. 2 2
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
  59. 2 3
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
  60. 2 1
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
  61. 1 2
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
  62. 5 4
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
  63. 4 2
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
  64. 3 4
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
  65. 0 4
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
  66. 3 5
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
  67. 0 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
  68. 1 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
  69. 0 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
  70. 0 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
  71. 1 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
  72. 1 3
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
  73. 0 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
  74. 0 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
  75. 1 4
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
  76. 1 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
  77. 1 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
  78. 0 13
      hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
  79. 0 2
      hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
  80. 0 4
      hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java

+ 2 - 4
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java

@@ -38,7 +38,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Semaphore;
@@ -188,13 +187,12 @@ public class XceiverClient extends XceiverClientSpi {
   /**
    * Create a pipeline.
    *
-   * @param pipeline -  pipeline to be created.
+   * @param ignored -  pipeline to be created.
    */
   @Override
-  public void createPipeline(Pipeline pipeline)
+  public void createPipeline(Pipeline ignored)
       throws IOException {
     // For stand alone pipeline, there is no notion called setup pipeline.
-    return;
   }
 
   /**

+ 2 - 4
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java

@@ -38,7 +38,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Semaphore;
@@ -218,13 +217,12 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   /**
    * Create a pipeline.
    *
-   * @param pipeline -  pipeline to be created.
+   * @param ignored -  pipeline to be created.
    */
   @Override
-  public void createPipeline(Pipeline pipeline)
+  public void createPipeline(Pipeline ignored)
       throws IOException {
     // For stand alone pipeline, there is no notion called setup pipeline.
-    return;
   }
 
   /**

+ 1 - 1
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java

@@ -154,7 +154,7 @@ public class XceiverClientManager implements Closeable {
               break;
             case CHAINED:
             default:
-              throw new IOException ("not implemented" + pipeline.getType());
+              throw new IOException("not implemented" + pipeline.getType());
             }
             client.connect();
             return client;

+ 4 - 3
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java

@@ -65,7 +65,8 @@ public class ChunkInputStream extends InputStream implements Seekable {
    * @param chunks list of chunks to read
    * @param traceID container protocol call traceID
    */
-  public ChunkInputStream(BlockID blockID, XceiverClientManager xceiverClientManager,
+  public ChunkInputStream(
+      BlockID blockID, XceiverClientManager xceiverClientManager,
       XceiverClientSpi xceiverClient, List<ChunkInfo> chunks, String traceID) {
     this.blockID = blockID;
     this.traceID = traceID;
@@ -211,8 +212,8 @@ public class ChunkInputStream extends InputStream implements Seekable {
     if (pos < 0 || (chunks.size() == 0 && pos > 0)
         || pos >= chunkOffset[chunks.size() - 1] + chunks.get(chunks.size() - 1)
         .getLen()) {
-      throw new EOFException(
-          "EOF encountered pos: " + pos + " container key: " + blockID.getLocalID());
+      throw new EOFException("EOF encountered pos: " + pos + " container key: "
+          + blockID.getLocalID());
     }
     if (chunkIndex == -1) {
       chunkIndex = Arrays.binarySearch(chunkOffset, pos);

+ 2 - 2
hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java

@@ -76,8 +76,8 @@ public class ChunkOutputStream extends OutputStream {
    * @param chunkSize chunk size
    */
   public ChunkOutputStream(BlockID blockID, String key,
-       XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient,
-       String traceID, int chunkSize) {
+       XceiverClientManager xceiverClientManager,
+       XceiverClientSpi xceiverClient, String traceID, int chunkSize) {
     this.blockID = blockID;
     this.key = key;
     this.traceID = traceID;

+ 3 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import java.util.Objects;
 
 /**
- * BlockID of ozone (containerID  localID)
+ * BlockID of ozone (containerID  localID).
  */
 public class BlockID {
   private long containerID;
@@ -65,7 +65,8 @@ public class BlockID {
         setContainerID(containerID).setLocalID(localID).build();
   }
 
-  public static BlockID getFromProtobuf(ContainerProtos.DatanodeBlockID blockID) {
+  public static BlockID getFromProtobuf(
+      ContainerProtos.DatanodeBlockID blockID) {
     return new BlockID(blockID.getContainerID(),
         blockID.getLocalID());
   }

+ 0 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -29,7 +28,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;

+ 2 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java

@@ -43,8 +43,8 @@ public final class AllocatedBlock {
       return this;
     }
 
-    public Builder setBlockID(BlockID blockID) {
-      this.blockID = blockID;
+    public Builder setBlockID(BlockID blockId) {
+      this.blockID = blockId;
       return this;
     }
 

+ 6 - 6
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java

@@ -396,13 +396,13 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
     private ReplicationType replicationType;
 
     public Builder setReplicationType(
-        ReplicationType replicationType) {
-      this.replicationType = replicationType;
+        ReplicationType repType) {
+      this.replicationType = repType;
       return this;
     }
 
-    public Builder setPipelineID(PipelineID pipelineID) {
-      this.pipelineID = pipelineID;
+    public Builder setPipelineID(PipelineID pipelineId) {
+      this.pipelineID = pipelineId;
       return this;
     }
 
@@ -447,8 +447,8 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
       return this;
     }
 
-    public Builder setDeleteTransactionId(long deleteTransactionId) {
-      this.deleteTransactionId = deleteTransactionId;
+    public Builder setDeleteTransactionId(long deleteTransactionID) {
+      this.deleteTransactionId = deleteTransactionID;
       return this;
     }
 

+ 4 - 3
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java

@@ -26,8 +26,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 /**
  * Class wraps ozone container info.
  */
-public class ContainerWithPipeline
-    implements Comparator<ContainerWithPipeline>, Comparable<ContainerWithPipeline> {
+public class ContainerWithPipeline implements Comparator<ContainerWithPipeline>,
+    Comparable<ContainerWithPipeline> {
 
   private final ContainerInfo containerInfo;
   private final Pipeline pipeline;
@@ -45,7 +45,8 @@ public class ContainerWithPipeline
     return pipeline;
   }
 
-  public static ContainerWithPipeline fromProtobuf(HddsProtos.ContainerWithPipeline allocatedContainer) {
+  public static ContainerWithPipeline fromProtobuf(
+      HddsProtos.ContainerWithPipeline allocatedContainer) {
     return new ContainerWithPipeline(
         ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
         Pipeline.getFromProtoBuf(allocatedContainer.getPipeline()));

+ 5 - 6
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java

@@ -169,8 +169,8 @@ public class Pipeline {
    */
   public List<String> getDatanodeHosts() {
     List<String> dataHosts = new ArrayList<>();
-    for (DatanodeDetails id :getDatanodes().values()) {
-      dataHosts.add(id.getHostName());
+    for (DatanodeDetails datanode : getDatanodes().values()) {
+      dataHosts.add(datanode.getHostName());
     }
     return dataHosts;
   }
@@ -219,7 +219,7 @@ public class Pipeline {
    * Update the State of the pipeline.
    */
   public void setLifeCycleState(HddsProtos.LifeCycleState nextState) {
-     lifeCycleState = nextState;
+    lifeCycleState = nextState;
   }
 
   /**
@@ -244,9 +244,8 @@ public class Pipeline {
   public String toString() {
     final StringBuilder b = new StringBuilder(getClass().getSimpleName())
         .append("[");
-    getDatanodes().keySet().stream()
-        .forEach(id -> b.
-            append(id.endsWith(getLeaderID()) ? "*" + id : id));
+    getDatanodes().keySet().forEach(
+        node -> b.append(node.endsWith(getLeaderID()) ? "*" + id : id));
     b.append(" id:").append(id);
     if (getType() != null) {
       b.append(" type:").append(getType().toString());

+ 4 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java

@@ -38,7 +38,8 @@ public interface StorageContainerLocationProtocol {
    * set of datanodes that should be used creating this container.
    *
    */
-  ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType replicationType,
+  ContainerWithPipeline allocateContainer(
+      HddsProtos.ReplicationType replicationType,
       HddsProtos.ReplicationFactor factor, String owner)
       throws IOException;
 
@@ -61,7 +62,8 @@ public interface StorageContainerLocationProtocol {
    * @return ContainerWithPipeline - the container info with the pipeline.
    * @throws IOException
    */
-  ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException;
+  ContainerWithPipeline getContainerWithPipeline(long containerID)
+      throws IOException;
 
   /**
    * Ask SCM a list of containers with a range of container names

+ 12 - 9
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java

@@ -97,8 +97,9 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
    * @throws IOException
    */
   @Override
-  public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor, String owner) throws IOException {
+  public ContainerWithPipeline allocateContainer(
+      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
+      String owner) throws IOException {
 
     ContainerRequestProto request = ContainerRequestProto.newBuilder()
         .setReplicationFactor(factor)
@@ -116,7 +117,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
       throw new IOException(response.hasErrorMessage() ?
           response.getErrorMessage() : "Allocate container failed.");
     }
-    return ContainerWithPipeline.fromProtobuf(response.getContainerWithPipeline());
+    return ContainerWithPipeline.fromProtobuf(
+        response.getContainerWithPipeline());
   }
 
   public ContainerInfo getContainer(long containerID) throws IOException {
@@ -138,17 +140,18 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
   /**
    * {@inheritDoc}
    */
-  public ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException {
+  public ContainerWithPipeline getContainerWithPipeline(long containerID)
+      throws IOException {
     Preconditions.checkState(containerID >= 0,
         "Container ID cannot be negative");
-    GetContainerWithPipelineRequestProto request = GetContainerWithPipelineRequestProto
-        .newBuilder()
-        .setContainerID(containerID)
-        .build();
+    GetContainerWithPipelineRequestProto request =
+        GetContainerWithPipelineRequestProto.newBuilder()
+            .setContainerID(containerID).build();
     try {
       GetContainerWithPipelineResponseProto response =
           rpcProxy.getContainerWithPipeline(NULL_RPC_CONTROLLER, request);
-      return ContainerWithPipeline.fromProtobuf(response.getContainerWithPipeline());
+      return ContainerWithPipeline.fromProtobuf(
+          response.getContainerWithPipeline());
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }

+ 3 - 3
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java

@@ -113,8 +113,8 @@ public final class ContainerProtocolCalls  {
    * @throws IOException if there is an I/O error while performing the call
    */
   public static ContainerProtos.GetCommittedBlockLengthResponseProto
-  getCommittedBlockLength(
-      XceiverClientSpi xceiverClient, BlockID blockID, String traceID)
+      getCommittedBlockLength(
+          XceiverClientSpi xceiverClient, BlockID blockID, String traceID)
       throws IOException {
     ContainerProtos.GetCommittedBlockLengthRequestProto.Builder
         getBlockLengthRequestBuilder =
@@ -375,7 +375,7 @@ public final class ContainerProtocolCalls  {
   }
 
   /**
-   * Reads the data given the blockID
+   * Reads the data given the blockID.
    *
    * @param client
    * @param blockID - ID of the block

+ 0 - 5
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

@@ -110,11 +110,6 @@ public final class OzoneConsts {
     }
   }
 
-  /**
-   * Ozone handler types.
-   */
-  public static final String OZONE_HANDLER_DISTRIBUTED = "distributed";
-
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";

+ 5 - 3
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java

@@ -37,7 +37,8 @@ public class KeyData {
 
   /**
    * Represent a list of chunks.
-   * In order to reduce memory usage, chunkList is declared as an {@link Object}.
+   * In order to reduce memory usage, chunkList is declared as an
+   * {@link Object}.
    * When #elements == 0, chunkList is null.
    * When #elements == 1, chunkList refers to the only element.
    * When #elements > 1, chunkList refers to the list.
@@ -157,7 +158,7 @@ public class KeyData {
   }
 
   /**
-   * Adds chinkInfo to the list
+   * Adds chinkInfo to the list.
    */
   public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
     if (chunkList == null) {
@@ -237,7 +238,8 @@ public class KeyData {
     } else {
       final int n = chunks.size();
       chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
-      size = chunks.parallelStream().mapToLong(ContainerProtos.ChunkInfo::getLen).sum();
+      size = chunks.parallelStream().mapToLong(
+          ContainerProtos.ChunkInfo::getLen).sum();
     }
   }
 

+ 4 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java

@@ -36,7 +36,8 @@ import java.util.Properties;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class HddsVersionInfo {
-  private static final Logger LOG = LoggerFactory.getLogger(HddsVersionInfo.class);
+  private static final Logger LOG = LoggerFactory.getLogger(
+      HddsVersionInfo.class);
 
   private Properties info;
 
@@ -95,7 +96,8 @@ public class HddsVersionInfo {
     return info.getProperty("protocVersion", "Unknown");
   }
 
-  private static HddsVersionInfo HDDS_VERSION_INFO = new HddsVersionInfo("hdds");
+  private static final HddsVersionInfo HDDS_VERSION_INFO =
+      new HddsVersionInfo("hdds");
   /**
    * Get the HDDS version.
    * @return the Hdds version string, eg. "0.6.3-dev"

+ 0 - 1
hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java

@@ -55,7 +55,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 

+ 2 - 1
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java

@@ -232,7 +232,8 @@ public class HddsDatanodeService implements ServicePlugin {
 
   public static void main(String[] args) {
     try {
-      if (DFSUtil.parseHelpArgument(args, "Starts HDDS Datanode", System.out, false)) {
+      if (DFSUtil.parseHelpArgument(
+          args, "Starts HDDS Datanode", System.out, false)) {
         System.exit(0);
       }
       Configuration conf = new OzoneConfiguration();

+ 9 - 13
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java

@@ -43,7 +43,6 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.nio.file.Path;
 import java.nio.file.Paths;
 import org.yaml.snakeyaml.Yaml;
 
@@ -54,8 +53,6 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.NO_SUCH_ALGORITHM;
 import static org.apache.hadoop.ozone.container.common.impl.ContainerData
     .CHARSET_ENCODING;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-
 
 /**
  * A set of helper functions to create proper responses.
@@ -75,14 +72,13 @@ public final class ContainerUtils {
    * @return ContainerCommand Response Builder.
    */
   public static ContainerCommandResponseProto.Builder
-  getContainerCommandResponse(
-      ContainerCommandRequestProto request, Result result, String message) {
-    return
-        ContainerCommandResponseProto.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setTraceID(request.getTraceID())
-            .setResult(result)
-            .setMessage(message);
+      getContainerCommandResponse(
+          ContainerCommandRequestProto request, Result result, String message) {
+    return ContainerCommandResponseProto.newBuilder()
+        .setCmdType(request.getCmdType())
+        .setTraceID(request.getTraceID())
+        .setResult(result)
+        .setMessage(message);
   }
 
   /**
@@ -287,7 +283,7 @@ public final class ContainerUtils {
   }
 
   /**
-   * Get the .container file from the containerBaseDir
+   * Get the .container file from the containerBaseDir.
    * @param containerBaseDir container base directory. The name of this
    *                         directory is same as the containerID
    * @return the .container file
@@ -301,7 +297,7 @@ public final class ContainerUtils {
   }
 
   /**
-   * ContainerID can be decoded from the container base directory name
+   * ContainerID can be decoded from the container base directory name.
    */
   public static long getContainerID(File containerBaseDir) {
     return Long.parseLong(containerBaseDir.getName());

+ 1 - 1
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java

@@ -132,7 +132,7 @@ public class ContainerSet {
   }
 
   /**
-   * Return a copy of the containerMap
+   * Return a copy of the containerMap.
    * @return containerMap
    */
   public Map<Long, Container> getContainerMap() {

+ 11 - 8
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java

@@ -45,10 +45,12 @@ public class OpenContainerBlockMap {
   /**
    * Map: localId -> KeyData.
    *
-   * In order to support {@link #getAll()}, the update operations are synchronized.
+   * In order to support {@link #getAll()}, the update operations are
+   * synchronized.
    */
   static class KeyDataMap {
-    private final ConcurrentMap<Long, KeyData> blocks = new ConcurrentHashMap<>();
+    private final ConcurrentMap<Long, KeyData> blocks =
+        new ConcurrentHashMap<>();
 
     KeyData get(long localId) {
       return blocks.get(localId);
@@ -59,7 +61,8 @@ public class OpenContainerBlockMap {
       return blocks.size();
     }
 
-    synchronized KeyData computeIfAbsent(long localId, Function<Long, KeyData> f) {
+    synchronized KeyData computeIfAbsent(
+        long localId, Function<Long, KeyData> f) {
       return blocks.computeIfAbsent(localId, f);
     }
 
@@ -76,7 +79,8 @@ public class OpenContainerBlockMap {
    *
    * For now, we will track all open blocks of a container in the blockMap.
    */
-  private final ConcurrentMap<Long, KeyDataMap> containers = new ConcurrentHashMap<>();
+  private final ConcurrentMap<Long, KeyDataMap> containers =
+      new ConcurrentHashMap<>();
 
   /**
    * Removes the Container matching with specified containerId.
@@ -109,7 +113,7 @@ public class OpenContainerBlockMap {
   }
 
   /**
-   * returns the list of open to the openContainerBlockMap
+   * Returns the list of open to the openContainerBlockMap.
    * @param containerId container id
    * @return List of open Keys(blocks)
    */
@@ -130,15 +134,14 @@ public class OpenContainerBlockMap {
   }
 
   /**
-   * Returns true if the block exists in the map, false otherwise
+   * Returns true if the block exists in the map, false otherwise.
    *
    * @param blockID
    * @return True, if it exists, false otherwise
    */
   public boolean checkIfBlockExists(BlockID blockID) {
     KeyDataMap keyDataMap = containers.get(blockID.getContainerID());
-    return keyDataMap == null ? false :
-        keyDataMap.get(blockID.getLocalID()) != null;
+    return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null;
   }
 
   @VisibleForTesting

+ 3 - 3
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java

@@ -71,10 +71,10 @@ import java.util.concurrent.atomic.AtomicLong;
  */
 public final class XceiverServerRatis implements XceiverServerSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverServerRatis.class);
-  private static final AtomicLong callIdCounter = new AtomicLong();
+  private static final AtomicLong CALL_ID_COUNTER = new AtomicLong();
 
   private static long nextCallId() {
-    return callIdCounter.getAndIncrement() & Long.MAX_VALUE;
+    return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE;
   }
 
   private final int port;
@@ -307,6 +307,6 @@ public final class XceiverServerRatis implements XceiverServerSpi {
       RaftClientRequest.Type type) {
     return new RaftClientRequest(clientId, server.getId(),
         PipelineID.getFromProtobuf(pipelineID).getRaftGroupID(),
-        nextCallId(),0, Message.valueOf(request.toByteString()), type);
+        nextCallId(), 0, Message.valueOf(request.toByteString()), type);
   }
 }

+ 2 - 2
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java

@@ -67,8 +67,8 @@ public interface KeyManager {
    * @param count    - Number of keys to return.
    * @return List of Keys that match the criteria.
    */
-  List<KeyData> listKey(Container container, long startLocalID, int count) throws
-      IOException;
+  List<KeyData> listKey(Container container, long startLocalID, int count)
+      throws IOException;
 
   /**
    * Returns the last committed block length for the block.

+ 8 - 8
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java

@@ -109,23 +109,23 @@ public class CommandStatus {
       return new CommandStatusBuilder();
     }
 
-    public CommandStatusBuilder setType(Type type) {
-      this.type = type;
+    public CommandStatusBuilder setType(Type commandType) {
+      this.type = commandType;
       return this;
     }
 
-    public CommandStatusBuilder setCmdId(Long cmdId) {
-      this.cmdId = cmdId;
+    public CommandStatusBuilder setCmdId(Long commandId) {
+      this.cmdId = commandId;
       return this;
     }
 
-    public CommandStatusBuilder setStatus(Status status) {
-      this.status = status;
+    public CommandStatusBuilder setStatus(Status commandStatus) {
+      this.status = commandStatus;
       return this;
     }
 
-    public CommandStatusBuilder setMsg(String msg) {
-      this.msg = msg;
+    public CommandStatusBuilder setMsg(String message) {
+      this.msg = message;
       return this;
     }
 

+ 4 - 2
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java

@@ -193,11 +193,13 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
     rpcCount.incrementAndGet();
     heartbeatCount.incrementAndGet();
     if(heartbeat.hasCommandStatusReport()){
-      cmdStatusList.addAll(heartbeat.getCommandStatusReport().getCmdStatusList());
+      cmdStatusList.addAll(heartbeat.getCommandStatusReport()
+          .getCmdStatusList());
       commandStatusReport.incrementAndGet();
     }
     sleepIfNeeded();
-    return SCMHeartbeatResponseProto.newBuilder().addAllCommands(scmCommandRequests)
+    return SCMHeartbeatResponseProto.newBuilder().addAllCommands(
+        scmCommandRequests)
         .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid())
         .build();
   }

+ 0 - 7
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java

@@ -19,17 +19,12 @@
 package org.apache.hadoop.ozone.container.common.interfaces;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
-import static org.junit.Assert.fail;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -37,8 +32,6 @@ import org.junit.rules.TestRule;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
-import java.util.UUID;
-
 /**
  * Tests Handler interface.
  */

+ 0 - 2
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.ozone.container.common.states.endpoint;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerAction;
 import org.apache.hadoop.hdds.protocol.proto

+ 2 - 3
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.volume;
 
 import org.apache.hadoop.fs.GetSpaceUsed;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
@@ -43,8 +42,8 @@ public class TestRoundRobinVolumeChoosingPolicy {
   private List<HddsVolume> volumes;
 
   private final String baseDir = MiniDFSCluster.getBaseDirectory();
-	private final String volume1 = baseDir + "disk1";
-	private final String volume2 = baseDir + "disk2";
+  private final String volume1 = baseDir + "disk1";
+  private final String volume2 = baseDir + "disk2";
   private static final String DUMMY_IP_ADDR = "0.0.0.0";
 
   @Before

+ 2 - 1
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java

@@ -62,7 +62,8 @@ public class TestOzoneContainer {
     conf = new OzoneConfiguration();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
         .getAbsolutePath());
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
+        folder.newFolder().getAbsolutePath());
   }
 
   @Test

+ 4 - 2
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java

@@ -180,9 +180,11 @@ public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
 
   }
 
-  protected abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+  protected abstract void onTimeout(
+      EventPublisher publisher, TIMEOUT_PAYLOAD payload);
 
-  protected abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+  protected abstract void onFinished(
+      EventPublisher publisher, TIMEOUT_PAYLOAD payload);
 
   public List<TIMEOUT_PAYLOAD> getTimeoutEvents(
       Predicate<? super TIMEOUT_PAYLOAD> predicate) {

+ 0 - 3
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java

@@ -22,9 +22,6 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.util.Set;
-import java.util.stream.Collectors;
-
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 
 /**

+ 9 - 9
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java

@@ -161,11 +161,11 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
     lock.lock();
     try {
       for (int i = 0; i < count; i++) {
-        ContainerWithPipeline containerWithPipeline = null;
+        ContainerWithPipeline containerWithPipeline;
         try {
           // TODO: Fix this later when Ratis is made the Default.
-          containerWithPipeline = containerManager.allocateContainer(type, factor,
-              owner);
+          containerWithPipeline = containerManager.allocateContainer(
+              type, factor, owner);
 
           if (containerWithPipeline == null) {
             LOG.warn("Unable to allocate container.");
@@ -293,12 +293,12 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
 
   private String getChannelName(ReplicationType type) {
     switch (type) {
-      case RATIS:
-        return "RA" + UUID.randomUUID().toString().substring(3);
-      case STAND_ALONE:
-        return "SA" + UUID.randomUUID().toString().substring(3);
-      default:
-        return "RA" + UUID.randomUUID().toString().substring(3);
+    case RATIS:
+      return "RA" + UUID.randomUUID().toString().substring(3);
+    case STAND_ALONE:
+      return "SA" + UUID.randomUUID().toString().substring(3);
+    default:
+      return "RA" + UUID.randomUUID().toString().substring(3);
     }
   }
 

+ 2 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java

@@ -232,7 +232,8 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
     lock.lock();
     try {
       Set<UUID> dnsWithCommittedTxn;
-      for (DeleteBlockTransactionResult transactionResult : transactionResults) {
+      for (DeleteBlockTransactionResult transactionResult :
+          transactionResults) {
         if (isTransactionFailed(transactionResult)) {
           continue;
         }

+ 2 - 2
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java

@@ -109,8 +109,8 @@ public class SCMBlockDeletingService extends BackgroundService {
 
   public void handlePendingDeletes(PendingDeleteStatusList deletionStatusList) {
     DatanodeDetails dnDetails = deletionStatusList.getDatanodeDetails();
-    for (PendingDeleteStatusList.PendingDeleteStatus deletionStatus : deletionStatusList
-        .getPendingDeleteStatuses()) {
+    for (PendingDeleteStatusList.PendingDeleteStatus deletionStatus :
+        deletionStatusList.getPendingDeleteStatuses()) {
       LOG.info(
           "Block deletion txnID mismatch in datanode {} for containerID {}."
               + " Datanode delete txnID: {}, SCM txnID: {}",

+ 2 - 2
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java

@@ -62,8 +62,8 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
           containerManager.getContainerWithPipeline(containerID.getId());
       info = containerWithPipeline.getContainerInfo();
       if (info == null) {
-        LOG.error("Failed to update the container state. Container with id : {} "
-            + "does not exist", containerID.getId());
+        LOG.error("Failed to update the container state. Container with id : {}"
+            + " does not exist", containerID.getId());
         return;
       }
     } catch (IOException e) {

+ 2 - 2
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java

@@ -600,8 +600,8 @@ public class ContainerMapping implements Mapping {
         .setReplicationType(knownState.getReplicationType())
         .setReplicationFactor(knownState.getReplicationFactor());
 
-    // TODO: If current state doesn't have this DN in list of DataNodes with replica
-    // then add it in list of replicas.
+    // TODO: If current state doesn't have this DN in list of DataNodes with
+    // replica then add it in list of replicas.
 
     // If used size is greater than allocated size, we will be updating
     // allocated size with used size. This update is done as a fallback

+ 4 - 3
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java

@@ -288,9 +288,10 @@ public class ContainerStateManager implements Closeable {
    * @return ContainerWithPipeline
    * @throws IOException  on Failure.
    */
-  public ContainerWithPipeline allocateContainer(PipelineSelector selector, HddsProtos
-      .ReplicationType type, HddsProtos.ReplicationFactor replicationFactor,
-      String owner) throws IOException {
+  public ContainerWithPipeline allocateContainer(PipelineSelector selector,
+      HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor replicationFactor, String owner)
+      throws IOException {
 
     Pipeline pipeline = selector.getReplicationPipeline(type,
         replicationFactor);

+ 1 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java

@@ -182,7 +182,7 @@ public class ReplicationManager implements Runnable {
   }
 
   /**
-   * Event for the ReplicationCommandWatcher to repeate the embedded request
+   * Event for the ReplicationCommandWatcher to repeate the embedded request.
    * in case fof timeout.
    */
   public static class ReplicationRequestToRepeat

+ 1 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java

@@ -381,7 +381,7 @@ public class ContainerStateMap {
   }
 
   /**
-   * Returns Open containers in the SCM by the Pipeline
+   * Returns Open containers in the SCM by the Pipeline.
    *
    * @param pipelineID - Pipeline id.
    * @return NavigableSet<ContainerID>

+ 2 - 2
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java

@@ -84,8 +84,8 @@ public class Node2ContainerMap {
    * @throws SCMException - if we don't know about this datanode, for new DN
    *                      use insertNewDatanode.
    */
-  public void setContainersForDatanode(UUID datanodeID, Set<ContainerID> containers)
-      throws SCMException {
+  public void setContainersForDatanode(UUID datanodeID,
+      Set<ContainerID> containers) throws SCMException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(containers);
     if (dn2ContainerMap

+ 0 - 2
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java

@@ -17,8 +17,6 @@
 
 package org.apache.hadoop.hdds.scm.node.states;
 
-import java.io.IOException;
-
 /**
  * This exception represents that the node that is being accessed does not
  * exist in NodeStateMap.

+ 2 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java

@@ -94,7 +94,8 @@ public class ReportResult {
       if (nullSafeMissingContainers == null) {
         nullSafeMissingContainers = Collections.emptySet();
       }
-      return new ReportResult(status, nullSafeMissingContainers, nullSafeNewContainers);
+      return new ReportResult(status, nullSafeMissingContainers,
+          nullSafeNewContainers);
     }
   }
 }

+ 22 - 28
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java

@@ -23,39 +23,31 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.Map;
-import java.util.HashSet;
-import java.util.Collections;
-
 import java.util.concurrent.ConcurrentHashMap;
 
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .DUPLICATE_DATANODE;
-
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
 
 /**
- * This data structure maintains the list of pipelines which the given datanode
- * is a part of.
- * This information will be added whenever a new pipeline allocation happens.
+ * This data structure maintains the list of pipelines which the given datanode is a part of. This
+ * information will be added whenever a new pipeline allocation happens.
  *
- * TODO: this information needs to be regenerated from pipeline reports on
- * SCM restart
+ * <p>TODO: this information needs to be regenerated from pipeline reports on SCM restart
  */
 public class Node2PipelineMap {
   private final Map<UUID, Set<Pipeline>> dn2PipelineMap;
 
-  /**
-   * Constructs a Node2PipelineMap Object.
-   */
+  /** Constructs a Node2PipelineMap Object. */
   public Node2PipelineMap() {
     dn2PipelineMap = new ConcurrentHashMap<>();
   }
 
   /**
-   * Returns true if this a datanode that is already tracked by
-   * Node2PipelineMap.
+   * Returns true if this a datanode that is already tracked by Node2PipelineMap.
    *
    * @param datanodeID - UUID of the Datanode.
    * @return True if this is tracked, false if this map does not know about it.
@@ -71,18 +63,17 @@ public class Node2PipelineMap {
    * @param datanodeID -- Datanode UUID
    * @param pipelines - set of pipelines.
    */
-  private void insertNewDatanode(UUID datanodeID, Set<Pipeline> pipelines)
-      throws SCMException {
+  private void insertNewDatanode(UUID datanodeID, Set<Pipeline> pipelines) throws SCMException {
     Preconditions.checkNotNull(pipelines);
     Preconditions.checkNotNull(datanodeID);
-    if(dn2PipelineMap.putIfAbsent(datanodeID, pipelines) != null) {
-      throw new SCMException("Node already exists in the map",
-          DUPLICATE_DATANODE);
+    if (dn2PipelineMap.putIfAbsent(datanodeID, pipelines) != null) {
+      throw new SCMException("Node already exists in the map", DUPLICATE_DATANODE);
     }
   }
 
   /**
    * Removes datanode Entry from the map.
+   *
    * @param datanodeID - Datanode ID.
    */
   public synchronized void removeDatanode(UUID datanodeID) {
@@ -98,20 +89,19 @@ public class Node2PipelineMap {
    */
   public Set<Pipeline> getPipelines(UUID datanode) {
     Preconditions.checkNotNull(datanode);
-    return dn2PipelineMap.computeIfPresent(datanode, (k, v) ->
-        Collections.unmodifiableSet(v));
+    return dn2PipelineMap.computeIfPresent(datanode, (k, v) -> Collections.unmodifiableSet(v));
   }
 
   /**
    * Adds a pipeline entry to a given dataNode in the map.
+   *
    * @param pipeline Pipeline to be added
    */
   public synchronized void addPipeline(Pipeline pipeline) {
     for (DatanodeDetails details : pipeline.getDatanodes().values()) {
       UUID dnId = details.getUuid();
       dn2PipelineMap
-          .computeIfAbsent(dnId,
-              k -> Collections.synchronizedSet(new HashSet<>()))
+          .computeIfAbsent(dnId, k -> Collections.synchronizedSet(new HashSet<>()))
           .add(pipeline);
     }
   }
@@ -119,8 +109,12 @@ public class Node2PipelineMap {
   public synchronized void removePipeline(Pipeline pipeline) {
     for (DatanodeDetails details : pipeline.getDatanodes().values()) {
       UUID dnId = details.getUuid();
-      dn2PipelineMap.computeIfPresent(dnId,
-          (k, v) -> {v.remove(pipeline); return v;});
+      dn2PipelineMap.computeIfPresent(
+          dnId,
+          (k, v) -> {
+            v.remove(pipeline);
+            return v;
+          });
     }
   }
 

+ 3 - 3
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java

@@ -111,7 +111,7 @@ public abstract class PipelineManager {
       ReplicationFactor replicationFactor);
 
   /**
-   * Initialize the pipeline
+   * Initialize the pipeline.
    * TODO: move the initialization to Ozone Client later
    */
   public abstract void initializePipeline(Pipeline pipeline) throws IOException;
@@ -176,7 +176,7 @@ public abstract class PipelineManager {
   }
 
   /**
-   * Remove the pipeline from active allocation
+   * Remove the pipeline from active allocation.
    * @param pipeline pipeline to be finalized
    */
   public synchronized void finalizePipeline(Pipeline pipeline) {
@@ -193,7 +193,7 @@ public abstract class PipelineManager {
   }
 
   /**
-   * list members in the pipeline .
+   * list members in the pipeline.
    * @return the datanode
    */
   public abstract List<DatanodeDetails> getMembers(PipelineID pipelineID)

+ 4 - 3
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java

@@ -126,7 +126,7 @@ public class PipelineSelector {
   }
 
   /**
-   * Event and State Transition Mapping:
+   * Event and State Transition Mapping.
    *
    * State: ALLOCATED ---------------> CREATING
    * Event:                CREATE
@@ -293,7 +293,7 @@ public class PipelineSelector {
       pipeline = manager.getPipeline(replicationFactor, replicationType);
     } else {
       // if a new pipeline is created, initialize its state machine
-      updatePipelineState(pipeline,HddsProtos.LifeCycleEvent.CREATE);
+      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATE);
 
       //TODO: move the initialization of pipeline to Ozone Client
       manager.initializePipeline(pipeline);
@@ -334,7 +334,8 @@ public class PipelineSelector {
   /**
    * Close a given pipeline.
    */
-  public void closePipelineIfNoOpenContainers(Pipeline pipeline) throws IOException {
+  public void closePipelineIfNoOpenContainers(Pipeline pipeline)
+      throws IOException {
     if (pipeline.getLifeCycleState() != LifeCycleState.CLOSING) {
       return;
     }

+ 2 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java

@@ -165,7 +165,8 @@ public class SCMClientProtocolServer implements
   }
 
   @Override
-  public ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException {
+  public ContainerWithPipeline getContainerWithPipeline(long containerID)
+      throws IOException {
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(remoteUser);
     return scm.getScmContainerManager()

+ 4 - 4
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java

@@ -74,7 +74,7 @@ public final class TestUtils {
         + "." + random.nextInt(256)
         + "." + random.nextInt(256)
         + "." + random.nextInt(256);
-   return createDatanodeDetails(uuid.toString(), "localhost", ipAddress);
+    return createDatanodeDetails(uuid.toString(), "localhost", ipAddress);
   }
 
   /**
@@ -259,12 +259,12 @@ public final class TestUtils {
     StorageTypeProto storageTypeProto =
         type == null ? StorageTypeProto.DISK : type;
     srb.setStorageType(storageTypeProto);
-   return srb.build();
+    return srb.build();
   }
 
 
   /**
-   * Generates random container reports
+   * Generates random container reports.
    *
    * @return ContainerReportsProto
    */
@@ -281,7 +281,7 @@ public final class TestUtils {
    */
   public static ContainerReportsProto getRandomContainerReports(
       int numberOfContainers) {
-   List<ContainerInfo> containerInfos = new ArrayList<>();
+    List<ContainerInfo> containerInfos = new ArrayList<>();
     for (int i = 0; i < numberOfContainers; i++) {
       containerInfos.add(getRandomContainerInfo(i));
     }

+ 0 - 1
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java

@@ -39,7 +39,6 @@ import org.junit.rules.ExpectedException;
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Paths;
-import java.util.Collections;
 
 import static org.apache.hadoop.ozone.OzoneConsts.GB;
 import static org.apache.hadoop.ozone.OzoneConsts.MB;

+ 4 - 3
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java

@@ -102,8 +102,8 @@ public class TestDeletedBlockLog {
     ContainerInfo containerInfo =
         new ContainerInfo.Builder().setContainerID(1).build();
     Pipeline pipeline =
-        new Pipeline(null, LifeCycleState.CLOSED, ReplicationType.RATIS,
-            ReplicationFactor.THREE, null);
+        new Pipeline(null, LifeCycleState.CLOSED,
+            ReplicationType.RATIS, ReplicationFactor.THREE, null);
     pipeline.addMember(dnList.get(0));
     pipeline.addMember(dnList.get(1));
     pipeline.addMember(dnList.get(2));
@@ -379,7 +379,8 @@ public class TestDeletedBlockLog {
     Assert.assertTrue(transactions.isFull());
   }
 
-  private void mockContainerInfo(long containerID, DatanodeDetails dd) throws IOException {
+  private void mockContainerInfo(long containerID, DatanodeDetails dd)
+      throws IOException {
     Pipeline pipeline =
         new Pipeline("fake", LifeCycleState.OPEN,
             ReplicationType.STAND_ALONE, ReplicationFactor.ONE,

+ 10 - 12
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java

@@ -48,12 +48,11 @@ import static org.junit.Assert.assertFalse;
 
 public class TestCommandStatusReportHandler implements EventPublisher {
 
-  private static Logger LOG = LoggerFactory
+  private static final Logger LOG = LoggerFactory
       .getLogger(TestCommandStatusReportHandler.class);
   private CommandStatusReportHandler cmdStatusReportHandler;
   private String storagePath = GenericTestUtils.getRandomizedTempPath()
       .concat("/" + UUID.randomUUID().toString());
-  ;
 
   @Before
   public void setup() {
@@ -69,10 +68,9 @@ public class TestCommandStatusReportHandler implements EventPublisher {
         .emptyList());
     cmdStatusReportHandler.onMessage(report, this);
     assertFalse(logCapturer.getOutput().contains("DeleteBlockCommandStatus"));
-    assertFalse(logCapturer.getOutput().contains
-        ("CloseContainerCommandStatus"));
-    assertFalse(logCapturer.getOutput().contains
-        ("ReplicateCommandStatus"));
+    assertFalse(logCapturer.getOutput().contains(
+        "CloseContainerCommandStatus"));
+    assertFalse(logCapturer.getOutput().contains("ReplicateCommandStatus"));
 
 
     report = this.getStatusReport(this.getCommandStatusList());
@@ -93,13 +91,13 @@ public class TestCommandStatusReportHandler implements EventPublisher {
 
   }
 
-  private CommandStatusReportFromDatanode getStatusReport(List<CommandStatus>
-      reports) {
-    CommandStatusReportsProto report = TestUtils.createCommandStatusReport
-        (reports);
+  private CommandStatusReportFromDatanode getStatusReport(
+      List<CommandStatus> reports) {
+    CommandStatusReportsProto report = TestUtils.createCommandStatusReport(
+        reports);
     DatanodeDetails dn = TestUtils.randomDatanodeDetails();
-    return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode
-        (dn, report);
+    return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode(
+        dn, report);
   }
 
   @Override

+ 0 - 1
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java

@@ -37,7 +37,6 @@ import org.junit.Test;
 import java.io.File;
 import java.io.IOException;
 
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;

+ 4 - 3
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java

@@ -178,8 +178,8 @@ public class TestContainerMapping {
     mapping
         .updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CLOSE);
     ContainerInfo finalContInfo = contInfo;
-    LambdaTestUtils.intercept(SCMException.class,"No entry exist for "
-        + "containerId:" , () -> mapping.getContainerWithPipeline(
+    LambdaTestUtils.intercept(SCMException.class, "No entry exist for "
+        + "containerId:", () -> mapping.getContainerWithPipeline(
         finalContInfo.getContainerID()));
 
     mapping.getStateManager().getContainerStateMap()
@@ -376,7 +376,8 @@ public class TestContainerMapping {
   @Test
   public void testFlushAllContainers() throws IOException {
     ContainerInfo info = createContainer();
-    List<ContainerInfo> containers = mapping.getStateManager().getAllContainers();
+    List<ContainerInfo> containers = mapping.getStateManager()
+        .getAllContainers();
     Assert.assertTrue(containers.size() > 0);
     mapping.flushContainerInfo();
   }

+ 1 - 1
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java

@@ -86,7 +86,7 @@ public class TestContainerReportHandler implements EventPublisher {
                     .setContainerID((Long) invocation.getArguments()[0])
                     .setState(LifeCycleState.CLOSED)
                     .build()
-        );
+      );
 
     ContainerStateManager containerStateManager =
         new ContainerStateManager(conf, mapping);

+ 4 - 4
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java

@@ -52,13 +52,13 @@ public class TestSCMContainerPlacementCapacity {
         .thenReturn(new ArrayList<>(datanodes));
 
     when(mockNodeManager.getNodeStat(anyObject()))
-        .thenReturn(new SCMNodeMetric(100l, 0L, 100L));
+        .thenReturn(new SCMNodeMetric(100L, 0L, 100L));
     when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100l, 90L, 10L));
+        .thenReturn(new SCMNodeMetric(100L, 90L, 10L));
     when(mockNodeManager.getNodeStat(datanodes.get(3)))
-        .thenReturn(new SCMNodeMetric(100l, 80L, 20L));
+        .thenReturn(new SCMNodeMetric(100L, 80L, 20L));
     when(mockNodeManager.getNodeStat(datanodes.get(4)))
-        .thenReturn(new SCMNodeMetric(100l, 70L, 30L));
+        .thenReturn(new SCMNodeMetric(100L, 70L, 30L));
 
     SCMContainerPlacementCapacity scmContainerPlacementRandom =
         new SCMContainerPlacementCapacity(mockNodeManager, conf);

+ 2 - 2
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java

@@ -51,9 +51,9 @@ public class TestSCMContainerPlacementRandom {
         .thenReturn(new ArrayList<>(datanodes));
 
     when(mockNodeManager.getNodeStat(anyObject()))
-        .thenReturn(new SCMNodeMetric(100l, 0l, 100l));
+        .thenReturn(new SCMNodeMetric(100L, 0L, 100L));
     when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100l, 90l, 10l));
+        .thenReturn(new SCMNodeMetric(100L, 90L, 10L));
 
     SCMContainerPlacementRandom scmContainerPlacementRandom =
         new SCMContainerPlacementRandom(mockNodeManager, conf);

+ 4 - 7
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java

@@ -21,7 +21,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -132,7 +131,7 @@ public class TestReplicationManager {
       //WHEN
 
       queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-          new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+          new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
               (short) 3));
 
       Thread.sleep(500L);
@@ -159,10 +158,8 @@ public class TestReplicationManager {
       leaseManager.start();
 
       ReplicationManager replicationManager =
-          new ReplicationManager(containerPlacementPolicy, containerStateManager,
-
-
-              queue, leaseManager) {
+          new ReplicationManager(containerPlacementPolicy,
+              containerStateManager, queue, leaseManager) {
             @Override
             protected List<DatanodeDetails> getCurrentReplicas(
                 ReplicationRequest request) throws IOException {
@@ -172,7 +169,7 @@ public class TestReplicationManager {
       replicationManager.start();
 
       queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-          new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+          new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
               (short) 3));
 
       Thread.sleep(500L);

+ 2 - 2
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java

@@ -92,8 +92,8 @@ public class TestReplicationQueue {
         1, replicationQueue.size());
     Assert.assertEquals(temp, msg5);
 
-    // Message 2 should be ordered before message 5 as both have same replication
-    // number but message 2 has earlier timestamp.
+    // Message 2 should be ordered before message 5 as both have same
+    // replication number but message 2 has earlier timestamp.
     temp = replicationQueue.take();
     Assert.assertEquals("Should have 0 objects",
         replicationQueue.size(), 0);

+ 2 - 3
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java

@@ -32,8 +32,6 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -158,7 +156,8 @@ public class TestContainerPlacement {
 
       assertTrue(nodeManager.isOutOfChillMode());
 
-      ContainerWithPipeline containerWithPipeline = containerManager.allocateContainer(
+      ContainerWithPipeline containerWithPipeline = containerManager
+          .allocateContainer(
           xceiverClientManager.getType(),
           xceiverClientManager.getFactor(), "OZONE");
       assertEquals(xceiverClientManager.getFactor().getNumber(),

+ 2 - 1
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java

@@ -424,7 +424,8 @@ public class TestNodeManager {
       List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount);
 
 
-      DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister(nodeManager);
+      DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister(
+          nodeManager);
 
       // Heartbeat once
       nodeManager.processHeartbeat(staleNode);

+ 1 - 2
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java

@@ -37,13 +37,12 @@ import org.slf4j.LoggerFactory;
 
 public class TestNodeReportHandler implements EventPublisher {
 
-  private static Logger LOG = LoggerFactory
+  private static final Logger LOG = LoggerFactory
       .getLogger(TestNodeReportHandler.class);
   private NodeReportHandler nodeReportHandler;
   private SCMNodeManager nodeManager;
   private String storagePath = GenericTestUtils.getRandomizedTempPath()
       .concat("/" + UUID.randomUUID().toString());
-  ;
 
   @Before
   public void resetEventCollector() throws IOException {

+ 5 - 4
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java

@@ -121,7 +121,7 @@ public class TestEndPoint {
     config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     config
         .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL,"1s");
+    config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s");
   }
 
   @Test
@@ -228,8 +228,8 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         nonExistentServerAddress, 1000)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
-          conf, null);
+      OzoneContainer ozoneContainer = new OzoneContainer(
+          TestUtils.randomDatanodeDetails(), conf, null);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
       EndpointStateMachine.EndPointStates newState = versionTask.call();
@@ -405,7 +405,8 @@ public class TestEndPoint {
       assertEquals(0, scmServerImpl.getCommandStatusReportCount());
 
       // Send heartbeat again from heartbeat endpoint task
-      final StateContext stateContext = heartbeatTaskHelper(serverAddress, 3000);
+      final StateContext stateContext = heartbeatTaskHelper(
+          serverAddress, 3000);
       Map<Long, CommandStatus> map = stateContext.getCommandStatusMap();
       assertNotNull(map);
       assertEquals("Should have 3 objects", 3, map.size());

+ 4 - 2
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java

@@ -87,11 +87,13 @@ public class TestContainerPlacement {
     for (int x = 0; x < opsCount; x++) {
       long containerSize = random.nextInt(100) * OzoneConsts.GB;
       List<DatanodeDetails> nodesCapacity =
-          capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired, containerSize);
+          capacityPlacer.chooseDatanodes(new ArrayList<>(), nodesRequired,
+              containerSize);
       assertEquals(nodesRequired, nodesCapacity.size());
 
       List<DatanodeDetails> nodesRandom =
-          randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired, containerSize);
+          randomPlacer.chooseDatanodes(nodesCapacity, nodesRequired,
+              containerSize);
 
       // One fifth of all calls are delete
       if (x % 5 == 0) {

+ 3 - 4
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java

@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.client;
 
-import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
@@ -158,9 +157,9 @@ public class ObjectStore {
     private OzoneVolume currentValue;
 
     /**
-     * Creates an Iterator to iterate over all volumes after prevVolume of the user.
-     * If prevVolume is null it iterates from the first volume. The returned volumes
-     * match volume prefix.
+     * Creates an Iterator to iterate over all volumes after
+     * prevVolume of the user. If prevVolume is null it iterates from the
+     * first volume. The returned volumes match volume prefix.
      * @param user user name
      * @param volPrefix volume prefix to match
      */

+ 0 - 4
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java

@@ -28,10 +28,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -39,7 +36,6 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.NavigableSet;
-import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos

+ 3 - 5
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java

@@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
@@ -54,9 +53,9 @@ public interface RatisTestHelper {
      *   OZONE_ENABLED = true
      *   RATIS_ENABLED = true
      */
-    public RatisTestSuite(final Class<?> clazz)
+    public RatisTestSuite()
         throws IOException, TimeoutException, InterruptedException {
-      conf = newOzoneConfiguration(clazz, RPC);
+      conf = newOzoneConfiguration(RPC);
       cluster = newMiniOzoneCluster(NUM_DATANODES, conf);
     }
 
@@ -84,8 +83,7 @@ public interface RatisTestHelper {
     }
   }
 
-  static OzoneConfiguration newOzoneConfiguration(
-      Class<?> clazz, RpcType rpc) {
+  static OzoneConfiguration newOzoneConfiguration(RpcType rpc) {
     final OzoneConfiguration conf = new OzoneConfiguration();
     initRatisConf(rpc, conf);
     return conf;

+ 0 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java

@@ -27,9 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;

+ 1 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java

@@ -73,8 +73,7 @@ public class TestCloseContainerHandlingByClient {
   /**
    * Create a MiniDFSCluster for testing.
    * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   * Ozone is made active by setting OZONE_ENABLED = true
    *
    * @throws IOException
    */

+ 0 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java

@@ -38,12 +38,10 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.slf4j.Logger;

+ 0 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java

@@ -75,7 +75,6 @@ import java.util.UUID;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Stage.COMBINED;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum;

+ 1 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java

@@ -53,8 +53,7 @@ public class TestOzoneContainerRatis {
   public Timeout testTimeout = new Timeout(300000);
 
   static OzoneConfiguration newOzoneConfiguration() {
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    return conf;
+    return new OzoneConfiguration();
   }
 
   private static void runTestOzoneContainerViaDataNodeRatis(

+ 1 - 3
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java

@@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.junit.Ignore;
@@ -46,8 +45,7 @@ public class TestRatisManager {
       TestRatisManager.class);
 
   static OzoneConfiguration newOzoneConfiguration() {
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    return conf;
+    return new OzoneConfiguration();
   }
 
 

+ 0 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java

@@ -30,8 +30,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import java.io.IOException;
-
 /**
  * Test allocate container calls.
  */

+ 0 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java

@@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.methods.HttpPost;

+ 1 - 4
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -95,8 +94,6 @@ public class TestBuckets {
       InterruptedException {
     conf = new OzoneConfiguration();
 
-    String path = GenericTestUtils
-        .getTempPath(TestBuckets.class.getSimpleName());
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -128,7 +125,7 @@ public class TestBuckets {
   }
 
   static void runTestCreateBucket(ClientProtocol client)
-      throws OzoneException, IOException, ParseException {
+      throws IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
         .setOwner("bilbo")

+ 1 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java

@@ -61,7 +61,7 @@ public class TestBucketsRatis {
 
   @BeforeClass
   public static void init() throws Exception {
-    suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class);
+    suite = new RatisTestHelper.RatisTestSuite();
     conf = suite.getConf();
   }
 

+ 1 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java

@@ -57,7 +57,7 @@ public class TestKeysRatis {
 
   @BeforeClass
   public static void init() throws Exception {
-    suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class);
+    suite = new RatisTestHelper.RatisTestSuite();
     path = GenericTestUtils.getTempPath(TestKeysRatis.class.getSimpleName());
     ozoneCluster = suite.getCluster();
     ozoneCluster.waitForClusterToBeReady();

+ 0 - 13
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java

@@ -43,28 +43,15 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE;
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT;
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.ozone

+ 0 - 2
hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java

@@ -42,7 +42,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
@@ -60,7 +59,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;

+ 0 - 4
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.scm.cli;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
-import com.google.protobuf.ByteString;
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
@@ -31,7 +30,6 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
@@ -56,8 +54,6 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.util.HashSet;
-import java.util.Set;
 
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;