浏览代码

HDFS-1252. Fix TestDFSConcurrentFileOperations. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security@1167430 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 年之前
父节点
当前提交
677d6c8a18

+ 3 - 0
CHANGES.txt

@@ -122,6 +122,9 @@ Release 0.20.205.0 - unreleased
     HDFS-1186. DNs should interrupt writers at start of recovery.
     (Todd Lipcon via suresh)
 
+    HDFS-1252. Fix TestDFSConcurrentFileOperations.
+    (Todd Lipcon via suresh).
+
   IMPROVEMENTS
 
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via

+ 3 - 3
src/test/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** Utilities for append-related tests */ 
-class AppendTestUtil {
+public class AppendTestUtil {
   /** For specifying the random number generator seed,
    *  change the following value:
    */
@@ -95,7 +95,7 @@ class AppendTestUtil {
     return DFSTestUtil.getFileSystemAs(ugi, conf);
   }
 
-  static void write(OutputStream out, int offset, int length) throws IOException {
+  public static void write(OutputStream out, int offset, int length) throws IOException {
     final byte[] bytes = new byte[length];
     for(int i = 0; i < length; i++) {
       bytes[i] = (byte)(offset + i);
@@ -103,7 +103,7 @@ class AppendTestUtil {
     out.write(bytes);
   }
   
-  static void check(FileSystem fs, Path p, long length) throws IOException {
+  public static void check(FileSystem fs, Path p, long length) throws IOException {
     int i = -1;
     try {
       final FileStatus status = fs.getFileStatus(p);

+ 7 - 17
src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -73,6 +74,7 @@ public class TestDFSConcurrentFileOperations extends TestCase {
     Configuration conf = new Configuration();
     
     conf.setLong("dfs.block.size", blockSize);
+    conf.setBoolean("dfs.support.append", true);
     
     init(conf);
     
@@ -81,23 +83,10 @@ public class TestDFSConcurrentFileOperations extends TestCase {
     Path srcPath = new Path(src);
     Path dstPath = new Path(dst);
     FSDataOutputStream fos = fs.create(srcPath);
-    
-    fos.write(DFSTestUtil.generateSequentialBytes(0, writeSize));
+   
+    AppendTestUtil.write(fos, 0, writeSize);
     fos.sync();
     
-    LocatedBlocks blocks;
-    int i = 0;
-    do {
-      blocks = cluster
-        .getNameNode()
-        .getNamesystem()
-        .getBlockLocations(src, 0, writeSize);
-    } while (blocks.getLocatedBlocks().isEmpty() && ++i < 1000);
-    
-    assertTrue("failed to get block for file", i < 1000);
-
-    Block block = blocks.get(blocks.getLocatedBlocks().size()-1).getBlock();
-    
     // renaming a file out from under a client will cause close to fail
     // and result in the lease remaining while the blocks are finalized on
     // the DNs
@@ -110,7 +99,8 @@ public class TestDFSConcurrentFileOperations extends TestCase {
       //expected
     }
 
-    // simulate what lease recovery does--tries to update block and finalize
-    cluster.getDataNodes().get(0).updateBlock(block, block, true);
+    FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+    AppendTestUtil.recoverFile(cluster, fs2, dstPath);
+    AppendTestUtil.check(fs2, dstPath, writeSize);
   }
 }