Browse Source

HDDS-1166. Fix checkstyle line length issues.
Contributed by Nandakumar.

Anu Engineer 6 năm trước cách đây
mục cha
commit
014e17af78

+ 2 - 1
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java

@@ -86,7 +86,8 @@ public class CloseContainerCommandHandler implements CommandHandler {
         return;
       }
 
-      if (container.getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED) {
+      if (container.getContainerState() ==
+          ContainerProtos.ContainerDataProto.State.CLOSED) {
         // Closing a container is an idempotent operation.
         return;
       }

+ 13 - 8
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java

@@ -220,17 +220,20 @@ public class TestCloseContainerCommandHandler {
       throws Exception {
     final OzoneConfiguration conf = new OzoneConfiguration();
     final DatanodeDetails datanodeDetails = randomDatanodeDetails();
-    final OzoneContainer ozoneContainer = getOzoneContainer(conf, datanodeDetails);
+    final OzoneContainer ozoneContainer = getOzoneContainer(
+        conf, datanodeDetails);
     ozoneContainer.start();
     try {
-      final Container container = createContainer(conf, datanodeDetails, ozoneContainer);
+      final Container container = createContainer(
+          conf, datanodeDetails, ozoneContainer);
       Mockito.verify(context.getParent(),
           Mockito.times(1)).triggerHeartbeat();
       final long containerId = container.getContainerData().getContainerID();
       final PipelineID pipelineId = PipelineID.valueOf(UUID.fromString(
           container.getContainerData().getOriginPipelineId()));
 
-      final CloseContainerCommandHandler closeHandler = new CloseContainerCommandHandler();
+      final CloseContainerCommandHandler closeHandler =
+          new CloseContainerCommandHandler();
       final CloseContainerCommand closeCommand = new CloseContainerCommand(
           containerId, pipelineId);
 
@@ -240,12 +243,14 @@ public class TestCloseContainerCommandHandler {
           ozoneContainer.getContainerSet().getContainer(containerId)
               .getContainerState());
 
-      // The container is closed, now we send close command with pipeline id which doesn't exist.
-      // This should cause the datanode to trigger quasi close, since the container is already
-      // closed, this should do nothing. The command should not fail either.
+      // The container is closed, now we send close command with
+      // pipeline id which doesn't exist.
+      // This should cause the datanode to trigger quasi close, since the
+      // container is already closed, this should do nothing.
+      // The command should not fail either.
       final PipelineID randomPipeline = PipelineID.randomId();
-      final CloseContainerCommand quasiCloseCommand = new CloseContainerCommand(
-          containerId, randomPipeline);
+      final CloseContainerCommand quasiCloseCommand =
+          new CloseContainerCommand(containerId, randomPipeline);
       closeHandler.handle(quasiCloseCommand, ozoneContainer, context, null);
 
       Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,

+ 12 - 7
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java

@@ -50,11 +50,16 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+    .INVALID_BLOCK_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
 
 /** Block Manager manages the block access for SCM. */
 public class BlockManagerImpl implements EventHandler<Boolean>,
@@ -83,8 +88,8 @@ public class BlockManagerImpl implements EventHandler<Boolean>,
    * @param scm
    * @throws IOException
    */
-  public BlockManagerImpl(final Configuration conf, StorageContainerManager scm)
-      throws IOException {
+  public BlockManagerImpl(final Configuration conf,
+                          final StorageContainerManager scm) {
     Objects.requireNonNull(scm, "SCM cannot be null");
     this.pipelineManager = scm.getPipelineManager();
     this.containerManager = scm.getContainerManager();

+ 17 - 8
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java

@@ -31,10 +31,15 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
+    .DeleteBlockTransactionResult;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.command
+    .CommandStatusReportHandler.DeleteBlockStatus;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -50,8 +55,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static java.lang.Math.min;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
 
 /**
  * A implement class of {@link DeletedBlockLog}, and it uses
@@ -328,12 +335,14 @@ public class DeletedBlockLogImpl
           ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
                scmMetadataStore.getDeletedBlocksTXTable().iterator()) {
         while (iter.hasNext()) {
-          Table.KeyValue<Long, DeletedBlocksTransaction> keyValue = iter.next();
+          Table.KeyValue<Long, DeletedBlocksTransaction> keyValue =
+              iter.next();
           DeletedBlocksTransaction block = keyValue.getValue();
           if (block.getCount() > -1 && block.getCount() <= maxRetry) {
             if (transactions.addTransaction(block,
                 transactionToDNsCommitMap.get(block.getTxID()))) {
-              deleteTransactionMap.put(block.getContainerID(), block.getTxID());
+              deleteTransactionMap.put(block.getContainerID(),
+                  block.getTxID());
               transactionToDNsCommitMap
                   .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
             }

+ 2 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java

@@ -22,7 +22,8 @@ package org.apache.hadoop.hdds.scm.metadata;
 
 import com.google.protobuf.InvalidProtocolBufferException;
 import java.io.IOException;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.utils.db.Codec;
 
 /**

+ 8 - 5
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java

@@ -24,12 +24,14 @@ import java.security.cert.X509Certificate;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import java.io.IOException;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
+import org.apache.hadoop.hdds.security.x509.certificate.authority
+    .CertificateStore;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.utils.db.DBStore;
 import org.apache.hadoop.utils.db.DBStoreBuilder;
 import org.apache.hadoop.utils.db.Table;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.utils.db.TableIterator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -75,7 +77,8 @@ public class SCMMetadataStoreRDBImpl implements SCMMetadataStore {
    * @param config - Ozone Configuration.
    * @throws IOException - on Failure.
    */
-  public SCMMetadataStoreRDBImpl(OzoneConfiguration config) throws IOException {
+  public SCMMetadataStoreRDBImpl(OzoneConfiguration config)
+      throws IOException {
     this.configuration = config;
     start(this.configuration);
     this.txID = new AtomicLong(this.getLargestRecordedTXID());
@@ -187,8 +190,8 @@ public class SCMMetadataStoreRDBImpl implements SCMMetadataStore {
   private void checkTableStatus(Table table, String name) throws IOException {
     String logMessage = "Unable to get a reference to %s table. Cannot " +
         "continue.";
-    String errMsg = "Inconsistent DB state, Table - %s. Please check the logs" +
-        "for more info.";
+    String errMsg = "Inconsistent DB state, Table - %s. Please check the" +
+        " logs for more info.";
     if (table == null) {
       LOG.error(String.format(logMessage, name));
       throw new IOException(String.format(errMsg, name));

+ 2 - 1
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java

@@ -27,7 +27,8 @@ import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
+import org.apache.hadoop.hdds.security.x509.certificate.authority
+    .CertificateServer;
 
 /**
  * This class acts as an SCM builder Class. This class is important for us

+ 10 - 5
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java

@@ -18,9 +18,12 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.PipelineAction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -28,7 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
+import org.apache.hadoop.hdds.scm.server
+    .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
 import org.apache.hadoop.hdds.scm.server
     .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -54,7 +58,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client
+    .AuthenticationException;
 
 import java.io.File;
 import java.io.IOException;