Browse Source

HDFS-530. Refactor TestFileAppend* to remove code duplication. Contributed by Konstantin Boudnik

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@801736 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 15 years ago
parent
commit
474552fb26

+ 3 - 0
CHANGES.txt

@@ -86,6 +86,9 @@ Trunk (unreleased changes)
     HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches
     in BlockManager. (shv)
 
+    HDFS-530. Refactor TestFileAppend* to remove code duplication.
+    (Konstantin Boudnik via szetszwo)
+
   BUG FIXES
 
     HDFS-76. Better error message to users when commands fail because of 

+ 52 - 4
src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -23,13 +23,12 @@ import java.io.OutputStream;
 import java.util.Random;
 
 import junit.framework.TestCase;
+import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -61,7 +60,11 @@ class AppendTestUtil {
       return r;
     }
   };
-  
+  static final int BLOCK_SIZE = 1024;
+  static final int NUM_BLOCKS = 10;
+  static final int FILE_SIZE = NUM_BLOCKS * BLOCK_SIZE + 1;
+  static long seed = -1;
+
   static int nextInt() {return RANDOM.get().nextInt();}
   static int nextInt(int n) {return RANDOM.get().nextInt(n);}
   static int nextLong() {return RANDOM.get().nextInt();}
@@ -116,4 +119,49 @@ class AppendTestUtil {
       throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
     }
   }
+
+  /**
+   *  create a buffer that contains the entire test file data.
+   */
+  static byte[] initBuffer(int size) {
+    if (seed == -1)
+      seed = nextLong();
+    return randomBytes(seed, size);
+  }
+
+  /**
+   *  Creates a file but does not close it
+   *  Make sure to call close() on the returned stream
+   *  @throws IOException an exception might be thrown
+   */
+  static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+      throws IOException {
+    return fileSys.create(name, true,
+        fileSys.getConf().getInt("io.file.buffer.size", 4096),
+        (short) repl, (long) BLOCK_SIZE);
+  }
+
+  /**
+   *  Compare the content of a file created from FileSystem and Path with
+   *  the specified byte[] buffer's content
+   *  @throws IOException an exception might be thrown
+   */
+  static void checkFullFile(FileSystem fs, Path name, int len,
+                            final byte[] compareContent, String message) throws IOException {
+    FSDataInputStream stm = fs.open(name);
+    byte[] actual = new byte[len];
+    stm.readFully(0, actual);
+    checkData(actual, 0, compareContent, message);
+    stm.close();
+  }
+
+  private static void checkData(final byte[] actual, int from,
+                                final byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                   expected[from+idx]+" actual "+actual[idx],
+                   expected[from+idx], actual[idx]);
+      actual[idx] = 0;
+    }
+  }
 }

+ 32 - 73
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -26,7 +26,6 @@ import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,38 +42,15 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
  * support HDFS appends.
  */
 public class TestFileAppend extends TestCase {
-  static final int blockSize = 1024;
-  static final int numBlocks = 10;
-  static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
 
-  private long seed;
-  private byte[] fileContents = null;
-
-  //
-  // create a buffer that contains the entire test file data.
-  //
-  private void initBuffer(int size) {
-    seed = AppendTestUtil.nextLong();
-    fileContents = AppendTestUtil.randomBytes(seed, size);
-  }
-
-  /*
-   * creates a file but does not close it
-   */ 
-  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true,
-                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
-                                            (short)repl, (long)blockSize);
-    return stm;
-  }
+  private static byte[] fileContents = null;
 
   //
   // writes to file but does not close it
   //
   private void writeFile(FSDataOutputStream stm) throws IOException {
-    byte[] buffer = AppendTestUtil.randomBytes(seed, fileSize);
+    byte[] buffer = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     stm.write(buffer);
   }
 
@@ -89,16 +65,16 @@ public class TestFileAppend extends TestCase {
     while (!done) {
       try {
         Thread.sleep(1000);
-      } catch (InterruptedException e) {}
+      } catch (InterruptedException e) {;}
       done = true;
       BlockLocation[] locations = fileSys.getFileBlockLocations(
-          fileSys.getFileStatus(name), 0, fileSize);
-      if (locations.length < numBlocks) {
+          fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
+      if (locations.length < AppendTestUtil.NUM_BLOCKS) {
         System.out.println("Number of blocks found " + locations.length);
         done = false;
         continue;
       }
-      for (int idx = 0; idx < numBlocks; idx++) {
+      for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
         if (locations[idx].getHosts().length < repl) {
           System.out.println("Block index " + idx + " not yet replciated.");
           done = false;
@@ -106,43 +82,24 @@ public class TestFileAppend extends TestCase {
         }
       }
     }
-    FSDataInputStream stm = fileSys.open(name);
-    byte[] expected = new byte[numBlocks * blockSize];
+    byte[] expected = 
+        new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
     if (simulatedStorage) {
       for (int i= 0; i < expected.length; i++) {  
         expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
       }
     } else {
-      for (int i= 0; i < expected.length; i++) {  
-        expected[i] = fileContents[i];
-      }
+      System.arraycopy(fileContents, 0, expected, 0, expected.length);
     }
     // do a sanity check. Read the file
-    byte[] actual = new byte[numBlocks * blockSize];
-    stm.readFully(0, actual);
-    checkData(actual, 0, expected, "Read 1");
-  }
-
-  private void checkFullFile(FileSystem fs, Path name) throws IOException {
-    FSDataInputStream stm = fs.open(name);
-    byte[] actual = new byte[fileSize];
-    stm.readFully(0, actual);
-    checkData(actual, 0, fileContents, "Read 2");
-    stm.close();
+    AppendTestUtil.checkFullFile(fileSys, name,
+        AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
+        expected, "Read 1");
   }
 
-  private void checkData(byte[] actual, int from, byte[] expected, String message) {
-    for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
-      actual[idx] = 0;
-    }
-  }
-
-
   /**
    * Test that copy on write for blocks works correctly
+   * @throws IOException an exception might be thrown
    */
   public void testCopyOnWrite() throws IOException {
     Configuration conf = new Configuration();
@@ -159,7 +116,7 @@ public class TestFileAppend extends TestCase {
       // create a new file, write to it and close it.
       //
       Path file1 = new Path("/filestatus.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
       writeFile(stm);
       stm.close();
 
@@ -178,11 +135,9 @@ public class TestFileAppend extends TestCase {
       //
       for (int i = 0; i < blocks.size(); i = i + 2) {
         Block b = blocks.get(i).getBlock();
-        FSDataset fsd = dataset;
-        File f = fsd.getFile(b);
+        File f = dataset.getFile(b);
         File link = new File(f.toString() + ".link");
-        System.out.println("Creating hardlink for File " + f + 
-                           " to " + link);
+        System.out.println("Creating hardlink for File " + f + " to " + link);
         HardLink.createHardLink(f, link);
       }
 
@@ -193,7 +148,7 @@ public class TestFileAppend extends TestCase {
         Block b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned true",
-                   dataset.detachBlock(b, 1) == true);
+            dataset.detachBlock(b, 1));
       }
 
       // Since the blocks were already detached earlier, these calls should
@@ -203,7 +158,7 @@ public class TestFileAppend extends TestCase {
         Block b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned false",
-                   dataset.detachBlock(b, 1) == false);
+            !dataset.detachBlock(b, 1));
       }
 
     } finally {
@@ -214,30 +169,31 @@ public class TestFileAppend extends TestCase {
 
   /**
    * Test a simple flush on a simple HDFS file.
+   * @throws IOException an exception might be thrown
    */
   public void testSimpleFlush() throws IOException {
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     try {
 
       // create a new file.
       Path file1 = new Path("/simpleFlush.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
       System.out.println("Created file simpleFlush.dat");
 
       // write to file
-      int mid = fileSize/2;
+      int mid = AppendTestUtil.FILE_SIZE /2;
       stm.write(fileContents, 0, mid);
       stm.sync();
       System.out.println("Wrote and Flushed first part of file.");
 
       // write the remainder of the file
-      stm.write(fileContents, mid, fileSize - mid);
+      stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
       System.out.println("Written second part of file");
       stm.sync();
       stm.sync();
@@ -250,7 +206,8 @@ public class TestFileAppend extends TestCase {
       System.out.println("Closed file.");
 
       // verify that entire file is good
-      checkFullFile(fs, file1);
+      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+          fileContents, "Read 2");
 
     } catch (IOException e) {
       System.out.println("Exception :" + e);
@@ -267,36 +224,38 @@ public class TestFileAppend extends TestCase {
 
   /**
    * Test that file data can be flushed.
+   * @throws IOException an exception might be thrown
    */
   public void testComplexFlush() throws IOException {
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     try {
 
       // create a new file.
       Path file1 = new Path("/complexFlush.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
       System.out.println("Created file complexFlush.dat");
 
       int start = 0;
-      for (start = 0; (start + 29) < fileSize; ) {
+      for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
         stm.write(fileContents, start, 29);
         stm.sync();
         start += 29;
       }
-      stm.write(fileContents, start, fileSize-start);
+      stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start);
 
       // verify that full blocks are sane
       checkFile(fs, file1, 1);
       stm.close();
 
       // verify that entire file is good
-      checkFullFile(fs, file1);
+      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+          fileContents, "Read 2");
     } catch (IOException e) {
       System.out.println("Exception :" + e);
       throw e; 

+ 19 - 58
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -24,7 +24,6 @@ import java.util.Arrays;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,9 +55,7 @@ public class TestFileAppend2 extends TestCase {
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 
-  static final int blockSize = 1024;
   static final int numBlocks = 5;
-  static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
 
   private byte[] fileContents = null;
@@ -73,54 +70,14 @@ public class TestFileAppend2 extends TestCase {
   int numAppendsPerThread = 2000;
 ****/
   Workload[] workload = null;
-  ArrayList<Path> testFiles = new ArrayList<Path>();
+  final ArrayList<Path> testFiles = new ArrayList<Path>();
   volatile static boolean globalStatus = true;
 
-  //
-  // create a buffer that contains the entire test file data.
-  //
-  private void initBuffer(int size) {
-    long seed = AppendTestUtil.nextLong();
-    fileContents = AppendTestUtil.randomBytes(seed, size);
-  }
-
-  /*
-   * creates a file but does not close it
-   */ 
-  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true,
-                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
-                                            (short)repl, (long)blockSize);
-    return stm;
-  }
-
-  private void checkFile(FileSystem fs, Path name, int len) throws IOException {
-    FSDataInputStream stm = fs.open(name);
-    byte[] actual = new byte[len];
-    stm.readFully(0, actual);
-    checkData(actual, 0, fileContents, "Read 2");
-    stm.close();
-  }
-
-  private void checkFullFile(FileSystem fs, Path name) throws IOException {
-    checkFile(fs, name, fileSize);
-  }
-
-  private void checkData(byte[] actual, int from, byte[] expected, String message) {
-    for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
-      actual[idx] = 0;
-    }
-  }
-
-
   /**
    * Creates one file, writes a few bytes to it and then closed it.
    * Reopens the same file for appending, write all blocks and then close.
    * Verify that all data exists in file.
+   * @throws IOException an exception might be thrown
    */ 
   public void testSimpleAppend() throws IOException {
     Configuration conf = new Configuration();
@@ -129,7 +86,7 @@ public class TestFileAppend2 extends TestCase {
     }
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setBoolean("dfs.support.append", true);
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     try {
@@ -137,7 +94,7 @@ public class TestFileAppend2 extends TestCase {
 
         // create a new file.
         Path file1 = new Path("/simpleAppend.dat");
-        FSDataOutputStream stm = createFile(fs, file1, 1);
+        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
         System.out.println("Created file simpleAppend.dat");
   
         // write to file
@@ -161,14 +118,16 @@ public class TestFileAppend2 extends TestCase {
         // ensure getPos is set to reflect existing size of the file
         assertTrue(stm.getPos() > 0);
 
-        System.out.println("Writing " + (fileSize - mid2) + " bytes to file " + file1);
-        stm.write(fileContents, mid2, fileSize - mid2);
+        System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) +
+            " bytes to file " + file1);
+        stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
         System.out.println("Written second part of file");
         stm.close();
         System.out.println("Wrote and Closed second part of file.");
   
         // verify that entire file is good
-        checkFullFile(fs, file1);
+        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
+            fileContents, "Read 2");
       }
 
       { // test appending to an non-existing file.
@@ -285,7 +244,7 @@ public class TestFileAppend2 extends TestCase {
       for (int i = 0; i < numAppendsPerThread; i++) {
    
         // pick a file at random and remove it from pool
-        Path testfile = null;
+        Path testfile;
         synchronized (testFiles) {
           if (testFiles.size() == 0) {
             System.out.println("Completed write to almost all files.");
@@ -304,7 +263,7 @@ public class TestFileAppend2 extends TestCase {
           len = fs.getFileStatus(testfile).getLen();
 
           // if file is already full, then pick another file
-          if (len >= fileSize) {
+          if (len >= AppendTestUtil.FILE_SIZE) {
             System.out.println("File " + testfile + " is full.");
             continue;
           }
@@ -312,7 +271,7 @@ public class TestFileAppend2 extends TestCase {
           // do small size appends so that we can trigger multiple
           // appends to the same file.
           //
-          int left = (int)(fileSize - len)/3;
+          int left = (int)(AppendTestUtil.FILE_SIZE - len)/3;
           if (left <= 0) {
             left = 1;
           }
@@ -335,8 +294,7 @@ public class TestFileAppend2 extends TestCase {
                                  " expected size " + (len + sizeToAppend) +
                                  " waiting for namenode metadata update.");
               Thread.sleep(5000);
-            } catch (InterruptedException e) { 
-            }
+            } catch (InterruptedException e) {;}
           }
 
           assertTrue("File " + testfile + " size is " + 
@@ -344,7 +302,8 @@ public class TestFileAppend2 extends TestCase {
                      " but expected " + (len + sizeToAppend),
                     fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
 
-          checkFile(fs, testfile, (int)(len + sizeToAppend));
+          AppendTestUtil.checkFullFile(fs, testfile, (int)(len + sizeToAppend),
+              fileContents, "Read 2");
         } catch (Throwable e) {
           globalStatus = false;
           if (e != null && e.toString() != null) {
@@ -368,9 +327,10 @@ public class TestFileAppend2 extends TestCase {
 
   /**
    * Test that appends to files at random offsets.
+   * @throws IOException an exception might be thrown
    */
   public void testComplexAppend() throws IOException {
-    initBuffer(fileSize);
+    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     Configuration conf = new Configuration();
     conf.setInt("heartbeat.recheck.interval", 2000);
     conf.setInt("dfs.heartbeat.interval", 2);
@@ -392,7 +352,8 @@ public class TestFileAppend2 extends TestCase {
       for (int i = 0; i < numberOfFiles; i++) {
         short replication = (short)(AppendTestUtil.nextInt(numDatanodes) + 1);
         Path testFile = new Path("/" + i + ".dat");
-        FSDataOutputStream stm = createFile(fs, testFile, replication);
+        FSDataOutputStream stm =
+            AppendTestUtil.createFile(fs, testFile, replication);
         stm.close();
         testFiles.add(testFile);
       }

+ 24 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -66,7 +66,10 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     };  
   }
 
-  /** TC1: Append on block boundary. */
+  /**
+   * TC1: Append on block boundary.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC1() throws Exception {
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
@@ -91,7 +94,10 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
-  /** TC2: Append on non-block boundary. */
+  /**
+   * TC2: Append on non-block boundary.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC2() throws Exception {
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
@@ -116,7 +122,10 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
-  /** TC5: Only one simultaneous append. */
+  /**
+   * TC5: Only one simultaneous append.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC5() throws Exception {
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
@@ -143,7 +152,10 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     out.close();        
   }
 
-  /** TC7: Corrupted replicas are present. */
+  /**
+   * TC7: Corrupted replicas are present.
+   * @throws IOException an exception might be thrown
+   */
   public void testTC7() throws Exception {
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
@@ -188,7 +200,10 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     AppendTestUtil.check(fs, p, len1 + len2);
   }
 
-  /** TC11: Racing rename */
+  /**
+   * TC11: Racing rename
+   * @throws IOException an exception might be thrown
+   */
   public void testTC11() throws Exception {
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
@@ -241,7 +256,10 @@ public class TestFileAppend3 extends junit.framework.TestCase {
     }
   }
 
-  /** TC12: Append to partial CRC chunk */
+  /** 
+   * TC12: Append to partial CRC chunk
+   * @throws IOException an exception might be thrown
+   */
   public void testTC12() throws Exception {
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);