瀏覽代碼

HDFS-14101. Fixing underflow error in test. Contributed by Zsolt Venczel.

Sean Mackrory 6 年之前
父節點
當前提交
80e59e7876

+ 14 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java

@@ -80,10 +80,13 @@ public class TestListCorruptFileBlocks {
       cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
 
+      // Files are corrupted with 2 bytes before the end of the file,
+      // so that's the minimum length.
+      final int corruptionLength = 2;
       // create two files with one block each
       DFSTestUtil util = new DFSTestUtil.Builder().
           setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
-          setMaxLevels(1).setMaxSize(512).build();
+          setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
       util.createFiles(fs, "/srcdat10");
 
       // fetch bad file list from namenode. There should be none.
@@ -104,14 +107,13 @@ public class TestListCorruptFileBlocks {
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
-      long position = channel.size() - 2;
-      int length = 2;
-      byte[] buffer = new byte[length];
+      long position = channel.size() - corruptionLength;
+      byte[] buffer = new byte[corruptionLength];
       new Random(13L).nextBytes(buffer);
       channel.write(ByteBuffer.wrap(buffer), position);
       file.close();
       LOG.info("Deliberately corrupting file " + metaFile.getName() +
-          " at offset " + position + " length " + length);
+          " at offset " + position + " length " + corruptionLength);
 
       // read all files to trigger detection of corrupted replica
       try {
@@ -160,10 +162,13 @@ public class TestListCorruptFileBlocks {
           HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
       FileSystem fs = cluster.getFileSystem();
 
+      // Files are corrupted with 2 bytes before the end of the file,
+      // so that's the minimum length.
+      final int corruptionLength = 2;
       // create two files with one block each
       DFSTestUtil util = new DFSTestUtil.Builder().
           setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
-          setMaxLevels(1).setMaxSize(512).build();
+          setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build();
       util.createFiles(fs, "/srcdat10");
 
       // fetch bad file list from namenode. There should be none.
@@ -183,14 +188,13 @@ public class TestListCorruptFileBlocks {
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
-      long position = channel.size() - 2;
-      int length = 2;
-      byte[] buffer = new byte[length];
+      long position = channel.size() - corruptionLength;
+      byte[] buffer = new byte[corruptionLength];
       new Random(13L).nextBytes(buffer);
       channel.write(ByteBuffer.wrap(buffer), position);
       file.close();
       LOG.info("Deliberately corrupting file " + metaFile.getName() +
-          " at offset " + position + " length " + length);
+          " at offset " + position + " length " + corruptionLength);
 
       // read all files to trigger detection of corrupted replica
       try {