소스 검색

HDFS-11122. TestDFSAdmin#testReportCommand fails due to timed out. Contributed by Yiqun Lin

(cherry picked from commit aa6010ccca3045ce9f0bb819fb2cb7ff65e1822b)
Mingliang Liu 8 년 전
부모
커밋
ae7a20f524
1개의 변경된 파일16개의 추가작업 그리고 3개의 파일을 삭제
  1. 16 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

+ 16 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -25,6 +25,7 @@ import com.google.common.collect.Lists;
 import org.apache.commons.lang.text.StrBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationUtil;
+import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -39,6 +40,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
@@ -61,6 +64,7 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
@@ -376,7 +380,7 @@ public class TestDFSAdmin {
     return sb.toString();
   }
 
-  @Test(timeout = 30000)
+  @Test(timeout = 120000)
   public void testReportCommand() throws Exception {
     redirectStream();
 
@@ -434,6 +438,14 @@ public class TestDFSAdmin {
       assertEquals("Fail to corrupt all replicas for block " + block,
           replFactor, blockFilesCorrupted);
 
+      try {
+        IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
+            conf, true);
+        fail("Should have failed to read the file with corrupted blocks.");
+      } catch (ChecksumException ignored) {
+        // expected exception reading corrupt blocks
+      }
+
       /*
        * Increase replication factor, this should invoke transfer request.
        * Receiving datanode fails on checksum and reports it to namenode
@@ -446,6 +458,7 @@ public class TestDFSAdmin {
         public Boolean get() {
           LocatedBlocks blocks = null;
           try {
+            miniCluster.triggerBlockReports();
             blocks = client.getNamenode().getBlockLocations(file.toString(), 0,
                 Long.MAX_VALUE);
           } catch (IOException e) {
@@ -453,7 +466,7 @@ public class TestDFSAdmin {
           }
           return blocks != null && blocks.get(0).isCorrupt();
         }
-      }, 100, 60000);
+      }, 1000, 60000);
 
       BlockManagerTestUtil.updateState(
           miniCluster.getNameNode().getNamesystem().getBlockManager());
@@ -496,4 +509,4 @@ public class TestDFSAdmin {
         client.getDatanodeStorageReport(DatanodeReportType.DEAD).length);
     assertEquals(numCorruptBlocks, client.getCorruptBlocksCount());
   }
-}
+}