فهرست منبع

HDFS-741. TestHFlush test doesn't seek() past previously written part of the file. Contributed by Konstantin Boudnik

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@881014 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Boudnik 15 سال پیش
والد
کامیت
f08019b63b
2فایلهای تغییر یافته به همراه14 افزوده شده و 13 حذف شده
  1. 3 0
      CHANGES.txt
  2. 11 13
      src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java

+ 3 - 0
CHANGES.txt

@@ -58,6 +58,9 @@ Trunk (unreleased changes)
     
     HDFS-774. Intermittent race condition in TestFiPipelines (cos)
 
+    HDFS-741. TestHFlush test doesn't seek() past previously written part of
+    the file (cos, szetszwo)
+
 Release 0.21.0 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 11 - 13
src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java

@@ -18,18 +18,17 @@
 package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataInputStream;
 import static org.junit.Assert.assertEquals;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.junit.Assert.assertTrue;
 import org.junit.Test;
 
 import java.io.IOException;
 
 /** Class contains a set of tests to verify the correctness of 
- * newly introduced {@link DFSClient#hflush()} method */
+ * newly introduced {@link FSDataOutputStream#hflush()} method */
 public class TestHFlush {
   private final String fName = "hflushtest.dat";
   
@@ -115,9 +114,12 @@ public class TestHFlush {
         System.arraycopy(fileContent, tenth * i, expected, 0, tenth);
         // Open the same file for read. Need to create new reader after every write operation(!)
         is = fileSystem.open(path);
-        is.read(toRead, tenth * i, tenth);
+        is.seek(tenth * i);
+        int readBytes = is.read(toRead, 0, tenth);
+        System.out.println("Has read " + readBytes);
+        assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
         is.close();
-        checkData(toRead, 0, expected, "Partial verification");
+        checkData(toRead, 0, readBytes, expected, "Partial verification");
       }
       System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding) + " section to file " + fileName);
       stm.write(fileContent, tenth * SECTIONS, rounding);
@@ -125,10 +127,6 @@ public class TestHFlush {
 
       assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
       AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
-
-    } catch (IOException ioe) {
-      ioe.printStackTrace();
-      throw ioe;
     } catch (Exception e) {
       e.printStackTrace();
     } finally {
@@ -136,9 +134,9 @@ public class TestHFlush {
       cluster.shutdown();
     }
   }
-  static void checkData(final byte[] actual, int from,
-                                final byte[] expected, String message) {
-    for (int idx = 0; idx < actual.length; idx++) {
+  static void checkData(final byte[] actual, int from, int len,
+                        final byte[] expected, String message) {
+    for (int idx = 0; idx < len; idx++) {
       assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                    expected[from+idx]+" actual "+actual[idx],
                    expected[from+idx], actual[idx]);