Browse Source

HDFS-16075. Use empty array constants present in StorageType and DatanodeInfo to avoid creating redundant objects (#3115)

Reviewed-by: Hui Fei <ferhui@apache.org>
Viraj Jasani 3 years ago
parent
commit
c488abbc79
19 changed files with 29 additions and 26 deletions
  1. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
  2. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java
  3. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
  4. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
  5. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java
  6. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
  7. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
  8. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
  9. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  10. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
  11. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  12. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
  13. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java
  14. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  15. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
  16. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
  17. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
  18. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
  19. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java

@@ -74,7 +74,7 @@ public class BlockLocation implements Serializable {
 
   private static final String[] EMPTY_STR_ARRAY = new String[0];
   private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
-      new StorageType[0];
+      StorageType.EMPTY_ARRAY;
 
   /**
    * Default Constructor.

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java

@@ -27,7 +27,7 @@ public class TestBlockLocation {
 
   private static final String[] EMPTY_STR_ARRAY = new String[0];
   private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY =
-      new StorageType[0];
+      StorageType.EMPTY_ARRAY;
 
   private static void checkBlockLocation(final BlockLocation loc)
       throws Exception {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

@@ -1683,7 +1683,7 @@ class DataStreamer extends Daemon {
 
   DatanodeInfo[] getExcludedNodes() {
     return excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
-            .keySet().toArray(new DatanodeInfo[0]);
+            .keySet().toArray(DatanodeInfo.EMPTY_ARRAY);
   }
 
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java

@@ -131,7 +131,7 @@ public class LocatedBlocks {
   public int findBlock(long offset) {
     // create fake block of size 0 as a key
     LocatedBlock key = new LocatedBlock(
-        new ExtendedBlock(), new DatanodeInfo[0]);
+        new ExtendedBlock(), DatanodeInfo.EMPTY_ARRAY);
     key.setStartOffset(offset);
     key.getBlock().setNumBytes(1);
     Comparator<LocatedBlock> comp =

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java

@@ -535,7 +535,7 @@ public class MockNamenode {
    */
   private static LocatedBlock getMockLocatedBlock(final String nsId) {
     LocatedBlock lb = mock(LocatedBlock.class);
-    when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
+    when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
     DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0",
         1111, 1112, 1113, 1114);
     DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

@@ -1241,7 +1241,7 @@ public class TestRouterRpc {
         newRouterFile, clientName, null, null,
         status.getFileId(), null, null);
 
-    DatanodeInfo[] exclusions = new DatanodeInfo[0];
+    DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
     LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
         newRouterFile, status.getFileId(), block.getBlock(),
         block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java

@@ -314,7 +314,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
       assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock);
 
       // Get additionalDatanode via router and block is not null.
-      DatanodeInfo[] exclusions = new DatanodeInfo[0];
+      DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
       LocatedBlock newBlock = clientProtocol.getAdditionalDatanode(
           testPath, status.getFileId(), blockTwo.getBlock(),
           blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java

@@ -209,7 +209,7 @@ final class FSDirAppendOp {
       BlockInfo lastBlock = file.getLastBlock();
       if (lastBlock != null) {
         ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
-        ret = new LocatedBlock(blk, new DatanodeInfo[0]);
+        ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY);
       }
     }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -883,7 +883,7 @@ public class TestDFSClientRetries {
     DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
     
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
-    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
+    LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
 
     ClientDatanodeProtocol proxy = null;
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java

@@ -242,7 +242,7 @@ public class TestDFSInputStream {
       DFSInputStream dfsInputStream =
           (DFSInputStream) fs.open(filePath).getWrappedStream();
       LocatedBlock lb = mock(LocatedBlock.class);
-      when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
+      when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
       DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
           1112, 1113, 1114);
       DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);
@@ -271,7 +271,7 @@ public class TestDFSInputStream {
       DFSInputStream dfsInputStream =
               (DFSInputStream) fs.open(filePath).getWrappedStream();
       LocatedBlock lb = mock(LocatedBlock.class);
-      when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]);
+      when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY);
       DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111,
               1112, 1113, 1114);
       DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -461,8 +461,8 @@ public class TestLeaseRecovery {
 
       // Add a block to the file
       ExtendedBlock block = client.getNamenode().addBlock(
-          file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(),
-          new String[0], null).getBlock();
+          file, client.clientName, null, DatanodeInfo.EMPTY_ARRAY,
+          stat.getFileId(), new String[0], null).getBlock();
 
       // update the pipeline to get a new genstamp.
       ExtendedBlock updatedBlock = client.getNamenode()
@@ -578,7 +578,7 @@ public class TestLeaseRecovery {
     // Add a block to the file
     LocatedBlock blk = client.getNamenode()
         .addBlock(file, client.clientName, null,
-            new DatanodeInfo[0], stat.getFileId(), new String[0], null);
+            DatanodeInfo.EMPTY_ARRAY, stat.getFileId(), new String[0], null);
     ExtendedBlock finalBlock = blk.getBlock();
     if (bytesToWrite != null) {
       // Here we create a output stream and then abort it so the block gets

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java

@@ -65,7 +65,7 @@ public class TestReplaceDatanodeOnFailure {
 
     final DatanodeInfo[] infos = new DatanodeInfo[5];
     final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
-    datanodes[0] = new DatanodeInfo[0];
+    datanodes[0] = DatanodeInfo.EMPTY_ARRAY;
     for(int i = 0; i < infos.length; ) {
       infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
       i++;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java

@@ -31,7 +31,7 @@ public class TestLocatedBlock {
 
   @Test(timeout = 10000)
   public void testAddCachedLocWhenEmpty() {
-    DatanodeInfo[] ds = new DatanodeInfo[0];
+    DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY;
     ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
     LocatedBlock l1 = new LocatedBlock(b1, ds);
     DatanodeDescriptor dn = new DatanodeDescriptor(

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -391,7 +391,7 @@ public class TestBlockToken {
     DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
 
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
-    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
+    LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY);
     fakeBlock.setBlockToken(token);
 
     // Create another RPC proxy with the same configuration - this will never

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java

@@ -392,7 +392,8 @@ public class TestDatanodeManager {
       storageTypesList.add(StorageType.PROVIDED);
     }
 
-    StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]);
+    StorageType[] storageTypes = storageTypesList.toArray(
+        StorageType.EMPTY_ARRAY);
 
     for (int i = 0; i < totalDNs; i++) {
       // register new datanode
@@ -694,7 +695,8 @@ public class TestDatanodeManager {
     List<StorageType> storageTypesList =
         new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK,
             StorageType.SSD, StorageType.DEFAULT, StorageType.SSD));
-    StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
+    StorageType[] storageTypes = storageTypesList.toArray(
+        StorageType.EMPTY_ARRAY);
 
     for (int i = 0; i < totalDNs; i++) {
       // Register new datanode.
@@ -779,7 +781,8 @@ public class TestDatanodeManager {
     List<StorageType> storageTypesList =
         new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK,
             StorageType.DEFAULT, StorageType.SSD, StorageType.SSD));
-    StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]);
+    StorageType[] storageTypes = storageTypesList.toArray(
+        StorageType.EMPTY_ARRAY);
 
     for (int i = 0; i < totalDNs; i++) {
       // Register new datanode.

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java

@@ -127,8 +127,8 @@ public class TestDataXceiverLazyPersistHint {
         StorageType.RAM_DISK,
         null,
         "Dummy-Client",
-        new DatanodeInfo[0],
-        new StorageType[0],
+        DatanodeInfo.EMPTY_ARRAY,
+        StorageType.EMPTY_ARRAY,
         mock(DatanodeInfo.class),
         BlockConstructionStage.PIPELINE_SETUP_CREATE,
         0, 0, 0, 0,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -165,7 +165,7 @@ public class TestDiskError {
         DataChecksum.Type.CRC32, 512);
     new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
         BlockTokenSecretManager.DUMMY_TOKEN, "",
-        new DatanodeInfo[0], new StorageType[0], null,
+        DatanodeInfo.EMPTY_ARRAY, StorageType.EMPTY_ARRAY, null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
         checksum, CachingStrategy.newDefaultStrategy(), false, false,
         null, null, new String[0]);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -119,7 +119,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
         LocatedBlock additionalLocatedBlock =
             nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
                 locatedBlock.getBlock(), locatedBlock.getLocations(),
-                locatedBlock.getStorageIDs(), new DatanodeInfo[0],
+                locatedBlock.getStorageIDs(), DatanodeInfo.EMPTY_ARRAY,
                 additionalReplication, clientMachine);
         doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
       }
@@ -159,7 +159,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
           LocatedBlock additionalLocatedBlock =
               nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
                   locatedBlock.getBlock(), partLocs,
-                  partStorageIDs, new DatanodeInfo[0],
+                  partStorageIDs, DatanodeInfo.EMPTY_ARRAY,
                   j, clientMachine);
           doTestLocatedBlock(i + j, additionalLocatedBlock);
         }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java

@@ -859,7 +859,7 @@ public class TestHASafeMode {
           pathString,
           client.getClientName(),
           new ExtendedBlock(previousBlock),
-          new DatanodeInfo[0],
+          DatanodeInfo.EMPTY_ARRAY,
           DFSClientAdapter.getFileId((DFSOutputStream) create
               .getWrappedStream()), null, null);
       cluster.restartNameNode(0, true);