Selaa lähdekoodia

HDFS-14029. Sleep in TestLazyPersistFiles should be put into a loop. Contributed by Adam Antal.

Arpit Agarwal 6 vuotta sitten
vanhempi
commit
9b899f1ebd

+ 7 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java

@@ -151,7 +151,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
  /**
  /**
   * If NN restarted then lazyPersist files should not deleted
   * If NN restarted then lazyPersist files should not deleted
   */
   */
-  @Test
+  @Test(timeout = 20000)
   public void testFileShouldNotDiscardedIfNNRestarted()
   public void testFileShouldNotDiscardedIfNNRestarted()
       throws IOException, InterruptedException, TimeoutException {
       throws IOException, InterruptedException, TimeoutException {
     getClusterBuilder().setRamDiskReplicaCapacity(2).build();
     getClusterBuilder().setRamDiskReplicaCapacity(2).build();
@@ -165,13 +165,12 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
     cluster.restartNameNodes();
     cluster.restartNameNodes();
 
 
     // wait for the redundancy monitor to mark the file as corrupt.
     // wait for the redundancy monitor to mark the file as corrupt.
-    Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
-
-    Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
-        .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
-
-    // Check block detected as corrupted
-    assertThat(corruptBlkCount, is(1L));
+    Long corruptBlkCount;
+    do {
+      Thread.sleep(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
+      corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
+          .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
+    } while (corruptBlkCount != 1L);
 
 
     // Ensure path1 exist.
     // Ensure path1 exist.
     Assert.assertTrue(fs.exists(path1));
     Assert.assertTrue(fs.exists(path1));