Browse Source

HDFS-7171. Fix Jenkins failures in HDFS-6581 branch. (Arpit Agarwal)

arp 10 years ago
parent
commit
a45ad330fa

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt

@@ -92,4 +92,5 @@
     HDFS-7129. Metrics to track usage of memory for writes. (Xiaoyu Yao
     HDFS-7129. Metrics to track usage of memory for writes. (Xiaoyu Yao
     via Arpit Agarwal)
     via Arpit Agarwal)
 
 
+    HDFS-7171. Fix Jenkins failures in HDFS-6581 branch. (Arpit Agarwal)
 
 

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2340,7 +2340,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
 
 
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
-      INode inode = dir.getINode(src);
 
 
       // get the corresponding policy and make sure the policy name is valid
       // get the corresponding policy and make sure the policy name is valid
       BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName);
       BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName);

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java

@@ -69,7 +69,7 @@ public class TestBlockStoragePolicy {
   static final byte COLD = (byte) 4;
   static final byte COLD = (byte) 4;
   static final byte WARM = (byte) 8;
   static final byte WARM = (byte) 8;
   static final byte HOT  = (byte) 12;
   static final byte HOT  = (byte) 12;
-
+  static final byte LAZY_PERSIST  = (byte) 15;
 
 
   @Test (timeout=300000)
   @Test (timeout=300000)
   public void testConfigKeyEnabled() throws IOException {
   public void testConfigKeyEnabled() throws IOException {
@@ -116,6 +116,9 @@ public class TestBlockStoragePolicy {
     expectedPolicyStrings.put(HOT,
     expectedPolicyStrings.put(HOT,
         "BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
         "BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
             "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
             "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
+    expectedPolicyStrings.put(LAZY_PERSIST,
+        "BlockStoragePolicy{LAZY_PERSIST:15, storageTypes=[RAM_DISK, DISK], " +
+            "creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
 
 
     for(byte i = 1; i < 16; i++) {
     for(byte i = 1; i < 16; i++) {
       final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); 
       final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); 
@@ -1141,7 +1144,7 @@ public class TestBlockStoragePolicy {
     final DistributedFileSystem fs = cluster.getFileSystem();
     final DistributedFileSystem fs = cluster.getFileSystem();
     try {
     try {
       BlockStoragePolicy[] policies = fs.getStoragePolicies();
       BlockStoragePolicy[] policies = fs.getStoragePolicies();
-      Assert.assertEquals(3, policies.length);
+      Assert.assertEquals(4, policies.length);
       Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
       Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
           policies[0].toString());
           policies[0].toString());
       Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),
       Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),

+ 0 - 52
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java

@@ -793,56 +793,4 @@ public class TestStorageMover {
       test.shutdownCluster();
       test.shutdownCluster();
     }
     }
   }
   }
-
-  /**
-   * Test blocks of lazy_persist file on RAM_DISK will not be moved to other
-   * storage types by the Storage Mover.
-   */
-  @Test
-  public void testRamDiskNotMoved() throws Exception {
-    LOG.info("testRamDiskNotMoved");
-    final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
-    final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
-
-    final long diskCapacity = 100 * BLOCK_SIZE;
-    final long archiveCapacity = (6 + HdfsConstants.MIN_BLOCKS_FOR_WRITE)
-      * BLOCK_SIZE;
-    final long ramDiskCapacity = 10 * BLOCK_SIZE;
-    final long[][] capacities = genCapacities(1, 0, 0, 1,
-      diskCapacity, archiveCapacity, ramDiskCapacity);
-    final int LAZY_WRITER_INTERVAL_SEC = 1;
-    final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
-      1, (short)1, genStorageTypes(1, 0, 0, 1), capacities);
-    clusterScheme.conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
-      LAZY_WRITER_INTERVAL_SEC);
-    final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
-
-    try {
-      test.runBasicTest(false);
-
-      // test creating a hot RAM_DISK file
-      final int SEED = 0xFADED;
-      final Path foo_hot = new Path(pathPolicyMap.hot, "foo_hot");
-      DFSTestUtil.createFile(test.dfs, foo_hot, true, BLOCK_SIZE, BLOCK_SIZE,
-        BLOCK_SIZE, (short) 1, SEED, true);
-      Assert.assertTrue(DFSTestUtil.verifyFileReplicasOnStorageType(test.dfs,
-        test.dfs.getClient(), foo_hot, StorageType.RAM_DISK));
-
-     // Sleep for a short time to allow the lazy writer thread to do its job
-      Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
-
-      // Verify policy related name change is allowed
-      final Path foo_hot_new = new Path(pathPolicyMap.warm, "foo_hot");
-      test.dfs.rename(foo_hot, pathPolicyMap.warm);
-      Assert.assertTrue(test.dfs.exists(foo_hot_new));
-
-      // Verify blocks on ram disk will not be moved to other storage types by
-      // policy based Storage Mover.
-      test.migrate();
-      Assert.assertTrue(DFSTestUtil.verifyFileReplicasOnStorageType(test.dfs,
-        test.dfs.getClient(), foo_hot_new, StorageType.RAM_DISK));
-    } finally {
-      test.shutdownCluster();
-    }
-  }
 }
 }