Explorar o código

HDFS-1657. Tests that corrupt block files fail due to changed file path in federation. Contributed by Suresh Srinivas.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1074766 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas %!s(int64=14) %!d(string=hai) anos
pai
achega
e0f4177c09

+ 3 - 0
CHANGES.txt

@@ -52,6 +52,9 @@ Trunk (unreleased changes)
     HDFS-1643. HDFS Federation: remove namenode argument from DataNode 
     constructor (boryas)
 
+    HDFS-1657. Tests that corrupt block files fail due to changed file 
+    path in federation. (suresh)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

+ 110 - 28
src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -579,8 +579,8 @@ public class MiniDFSCluster {
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
       Configuration dnConf = new HdfsConfiguration(conf);
       if (manageDfsDirs) {
-        File dir1 = new File(data_dir, "data"+(2*i+1));
-        File dir2 = new File(data_dir, "data"+(2*i+2));
+        File dir1 = getStorageDir(i, 0);
+        File dir2 = getStorageDir(i, 1);
         dir1.mkdirs();
         dir2.mkdirs();
         if (!dir1.isDirectory() || !dir2.isDirectory()) { 
@@ -821,40 +821,40 @@ public class MiniDFSCluster {
    * Corrupt a block on all datanode
    */
   void corruptBlockOnDataNodes(ExtendedBlock block) throws Exception{
-    for (int i=0; i < dataNodes.size(); i++)
-      corruptBlockOnDataNode(i, block);
+    File[] blockFiles = getAllBlockFiles(block);
+    for (File f : blockFiles) {
+      corruptBlock(f);
+    }
   }
 
   /*
    * Corrupt a block on a particular datanode
    * Types: delete, write bad data, truncate
    */
-  boolean corruptBlockOnDataNode(int i, ExtendedBlock blk) throws Exception {
-    Random random = new Random();
-    boolean corrupted = false;
-    File dataDir = new File(getBaseDirectory() + "data");
-    if (i < 0 || i >= dataNodes.size())
+  public static boolean corruptBlockOnDataNode(int i, ExtendedBlock blk)
+      throws IOException {
+    File blockFile = getBlockFile(i, blk);
+    return corruptBlock(blockFile);
+  }
+
+  /*
+   * Corrupt a block on a particular datanode
+   */
+  public static boolean corruptBlock(File blockFile) throws IOException {
+    if (blockFile == null || !blockFile.exists()) {
       return false;
-    
-    // TODO:FEDERATION use blockPoolId
-    String blockName = blk.getBlockName();
-    for (int dn = i*2; dn < i*2+2; dn++) {
-      File blockFile = new File(dataDir, "data" + (dn+1) + FINALIZED_DIR_NAME +
-                                blockName);
-      System.out.println("Corrupting for: " + blockFile);
-      if (blockFile.exists()) {
-        // Corrupt replica by writing random bytes into replica
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int)channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
-      }
-      corrupted = true;
     }
-    return corrupted;
+    // Corrupt replica by writing random bytes into replica
+    Random random = new Random();
+    RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+    FileChannel channel = raFile.getChannel();
+    String badString = "BADBAD";
+    int rand = random.nextInt((int)channel.size()/2);
+    raFile.seek(rand);
+    raFile.write(badString.getBytes());
+    raFile.close();
+    LOG.warn("Corrupting the block " + blockFile);
+    return true;
   }
 
   /*
@@ -1191,4 +1191,86 @@ public class MiniDFSCluster {
   public static String getBaseDirectory() {
     return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
   }
+
+  /**
+   * Get a storage directory for a datanode. There are two storage directories
+   * per datanode:
+   * <ol>
+   * <li><base directory>/data/data<2*dnIndex + 1></li>
+   * <li><base directory>/data/data<2*dnIndex + 2></li>
+   * </ol>
+   * 
+   * @param dnIndex datanode index (starts from 0)
+   * @param dirIndex directory index (0 or 1). Index 0 provides access to the
+   *          first storage directory. Index 1 provides access to the second
+   *          storage directory.
+   * @return Storage directory
+   */
+  public static File getStorageDir(int dnIndex, int dirIndex) {
+    return new File(getBaseDirectory() + "data" + (2*dnIndex + 1 + dirIndex));
+  }
+  
+  /**
+   * Get finalized directory for a block pool
+   * @param storageDir storage directory
+   * @param bpid Block pool Id
+   * @return finalized directory for a block pool
+   */
+  public static File getRbwDir(File storageDir, String bpid) {
+    return new File(storageDir, "/current/" + bpid + "/rbw/");
+  }
+  
+  /**
+   * Get finalized directory for a block pool
+   * @param storageDir storage directory
+   * @param bpid Block pool Id
+   * @return finalized directory for a block pool
+   */
+  public static File getFinalizedDir(File storageDir, String bpid) {
+    return new File(storageDir, "/current/" + bpid + "/finalized/");
+  }
+  
+  /**
+   * Get file correpsonding to a block
+   * @param storageDir storage directory
+   * @param blk block to be corrupted
+   * @return file corresponding to the block
+   */
+  public static File getBlockFile(File storageDir, ExtendedBlock blk) {
+    return new File(getFinalizedDir(storageDir, blk.getPoolId()), blk
+        .getBlockName());
+  }
+  
+  /**
+   * Get all files related to a block from all the datanodes
+   * @param block block for which corresponding files are needed
+   */
+  public File[] getAllBlockFiles(ExtendedBlock block) {
+    if (dataNodes.size() == 0) return new File[0];
+    ArrayList<File> list = new ArrayList<File>();
+    for (int i=0; i < dataNodes.size(); i++) {
+      File blockFile = getBlockFile(i, block);
+      if (blockFile != null) {
+        list.add(blockFile);
+      }
+    }
+    return list.toArray(new File[list.size()]);
+  }
+  
+  /**
+   * Get files related to a block for a given datanode
+   * @param dnIndex Index of the datanode to get block files for
+   * @param block block for which corresponding files are needed
+   */
+  public static File getBlockFile(int dnIndex, ExtendedBlock block) {
+    // Check for block file in the two storage directories of the datanode
+    for (int i = 0; i <=1 ; i++) {
+      File storageDir = MiniDFSCluster.getStorageDir(dnIndex, i);
+      File blockFile = getBlockFile(storageDir, block);
+      if (blockFile.exists()) {
+        return blockFile;
+      }
+    }
+    return null;
+  }
 }

+ 6 - 37
src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java

@@ -116,46 +116,15 @@ public class TestBlockMissingException extends TestCase {
     assertTrue("Expected BlockMissingException ", gotException);
   }
 
-  /*
-   * The Data directories for a datanode
-   */
-  private File[] getDataNodeDirs(int i) throws IOException {
-    String base_dir = MiniDFSCluster.getBaseDirectory();
-    File data_dir = new File(base_dir, "data");
-    File dir1 = new File(data_dir, "data"+(2*i+1));
-    File dir2 = new File(data_dir, "data"+(2*i+2));
-    if (dir1.isDirectory() && dir2.isDirectory()) {
-      File[] dir = new File[2];
-      dir[0] = new File(dir1, MiniDFSCluster.FINALIZED_DIR_NAME);
-      dir[1] = new File(dir2, MiniDFSCluster.FINALIZED_DIR_NAME); 
-      return dir;
-    }
-    return new File[0];
-  }
-
   //
   // Corrupt specified block of file
   //
-  void corruptBlock(Path file, ExtendedBlock blockNum) throws IOException {
-    long id = blockNum.getBlockId();
-
-    // Now deliberately remove/truncate data blocks from the block.
-    //
-    for (int i = 0; i < NUM_DATANODES; i++) {
-      File[] dirs = getDataNodeDirs(i);
-      
-      for (int j = 0; j < dirs.length; j++) {
-        File[] blocks = dirs[j].listFiles();
-        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length >= 0));
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (blocks[idx].getName().startsWith("blk_" + id) &&
-              !blocks[idx].getName().endsWith(".meta")) {
-            blocks[idx].delete();
-            LOG.info("Deleted block " + blocks[idx]);
-          }
-        }
-      }
+  void corruptBlock(Path file, ExtendedBlock blk) {
+    // Now deliberately remove/truncate data blocks from the file.
+    File[] blockFiles = dfs.getAllBlockFiles(blk);
+    for (File f : blockFiles) {
+      f.delete();
+      LOG.info("Deleted block " + f);
     }
   }
-
 }

+ 6 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java

@@ -83,8 +83,9 @@ public class TestCrcCorruption {
       // file disallows this Datanode to send data to another datanode.
       // However, a client is alowed access to this block.
       //
-      File data_dir = new File(System.getProperty("test.build.data"),
-                               "dfs/data/data1" + MiniDFSCluster.FINALIZED_DIR_NAME);
+      File storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      String bpid = cluster.getNamesystem().getPoolId();
+      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       File[] blocks = data_dir.listFiles();
       assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
@@ -136,12 +137,13 @@ public class TestCrcCorruption {
           }
         }
       }
+      
       //
       // Now deliberately corrupt all meta blocks from the second
       // directory of the first datanode
       //
-      data_dir = new File(System.getProperty("test.build.data"),
-                               "dfs/data/data2" + MiniDFSCluster.FINALIZED_DIR_NAME);
+      storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       blocks = data_dir.listFiles();
       assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));

+ 10 - 39
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -143,27 +143,7 @@ public class TestDatanodeBlockScanner extends TestCase {
   }
 
   public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException {
-    String blockName = blk.getLocalBlock().getBlockName();
-    Random random = new Random();
-    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
-    boolean corrupted = false;
-    // TODO:FEDERATION use BlockPoolId
-    for (int i=replica*2; i<replica*2+2; i++) {
-      File blockFile = new File(baseDir, "data" + (i+1) + 
-          MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
-      if (blockFile.exists()) {
-        // Corrupt replica by writing random bytes into replica
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int)channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
-        corrupted = true;
-      }
-    }
-    return corrupted;
+    return MiniDFSCluster.corruptBlockOnDataNode(replica, blk);
   }
 
   public void testBlockCorruptionPolicy() throws IOException {
@@ -427,31 +407,22 @@ public class TestDatanodeBlockScanner extends TestCase {
    */
   static boolean changeReplicaLength(ExtendedBlock blk, int dnIndex,
       int lenDelta) throws IOException {
-    String blockName = blk.getBlockName();
-    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
-    for (int i=dnIndex*2; i<dnIndex*2+2; i++) {
-      File blockFile = new File(baseDir, "data" + (i+1) + 
-          MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
-      if (blockFile.exists()) {
-        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        raFile.setLength(raFile.length()+lenDelta);
-        raFile.close();
-        return true;
-      }
+    File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
+    if (blockFile.exists()) {
+      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+      raFile.setLength(raFile.length()+lenDelta);
+      raFile.close();
+      return true;
     }
     return false;
   }
   
   private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex) 
   throws IOException, InterruptedException {
-    String blockName = blk.getBlockName();
-    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
-    File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1) + 
-        MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
-    File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2) + 
-        MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
-    while (blockFile1.exists() || blockFile2.exists()) {
+    File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
+    while (blockFile != null) {
       Thread.sleep(100);
+      blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
     }
   }
 }

+ 12 - 10
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -63,8 +63,9 @@ public class TestFileCorruption extends TestCase {
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
-      File data_dir = new File(System.getProperty("test.build.data"),
-                               "dfs/data/data5/current");
+      File storageDir = MiniDFSCluster.getStorageDir(2, 0);
+      String bpid = cluster.getNamesystem().getPoolId();
+      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       File[] blocks = data_dir.listFiles();
       assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
@@ -123,12 +124,14 @@ public class TestFileCorruption extends TestCase {
       DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
       
       // get the block
-      File dataDir = new File(cluster.getDataDirectory(),
-          "data1" + MiniDFSCluster.FINALIZED_DIR_NAME);
-      ExtendedBlock blk = getBlock(dataDir);
+      final String bpid = cluster.getNamesystem().getPoolId();
+      File storageDir = MiniDFSCluster.getStorageDir(0, 0);
+      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+      ExtendedBlock blk = getBlock(bpid, dataDir);
       if (blk == null) {
-        blk = getBlock(new File(cluster.getDataDirectory(),
-          "dfs/data/data2" + MiniDFSCluster.FINALIZED_DIR_NAME));
+        storageDir = MiniDFSCluster.getStorageDir(0, 1);
+        dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        blk = getBlock(bpid, dataDir);
       }
       assertFalse(blk==null);
 
@@ -154,7 +157,7 @@ public class TestFileCorruption extends TestCase {
     
   }
   
-  private ExtendedBlock getBlock(File dataDir) {
+  private ExtendedBlock getBlock(String bpid, File dataDir) {
     assertTrue("data directory does not exist", dataDir.exists());
     File[] blocks = dataDir.listFiles();
     assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));
@@ -181,7 +184,6 @@ public class TestFileCorruption extends TestCase {
         break;
       }
     }
-    // TODO:FEDERATION cleanup when BlockPoolID support in Datanode is complete
-    return new ExtendedBlock("TODO", blockId, blocks[idx].length(), blockTimeStamp);
+    return new ExtendedBlock(bpid, blockId, blocks[idx].length(), blockTimeStamp);
   }
 }

+ 3 - 9
src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java

@@ -326,25 +326,19 @@ public class TestReplication extends TestCase {
       waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
 
       // get first block of the file.
-      String block = dfsClient.getNamenode().
-                       getBlockLocations(testFile, 0, Long.MAX_VALUE).
-                       get(0).getBlock().getBlockName();
+      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(testFile,
+          0, Long.MAX_VALUE).get(0).getBlock();
       
       cluster.shutdown();
       cluster = null;
       
-      //Now mess up some of the replicas.
-      //Delete the first and corrupt the next two.
-      File baseDir = new File(System.getProperty("test.build.data"), 
-                                                 "dfs/data");
       for (int i=0; i<25; i++) {
         buffer[i] = '0';
       }
       
       int fileCount = 0;
       for (int i=0; i<6; i++) {
-        File blockFile = new File(baseDir, "data" + (i+1) + 
-            MiniDFSCluster.FINALIZED_DIR_NAME + block);
+        File blockFile = MiniDFSCluster.getBlockFile(i, block);
         LOG.info("Checking for file " + blockFile);
         
         if (blockFile.exists()) {

+ 6 - 3
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -101,7 +101,8 @@ public class TestDataNodeVolumeFailure extends TestCase{
     // fail the volume
     // delete/make non-writable one of the directories (failed volume)
     data_fail = new File(dataDir, "data3");
-    failedDir = new File(data_fail, MiniDFSCluster.FINALIZED_DIR_NAME);
+    failedDir = MiniDFSCluster.getFinalizedDir(dataDir, 
+        cluster.getNamesystem().getPoolId());
     if (failedDir.exists() &&
         //!FileUtil.fullyDelete(failedDir)
         !deteteBlocks(failedDir)
@@ -303,9 +304,11 @@ public class TestDataNodeVolumeFailure extends TestCase{
 
   private int countRealBlocks(Map<String, BlockLocs> map) {
     int total = 0;
+    final String bpid = cluster.getNamesystem().getPoolId();
     for(int i=0; i<dn_num; i++) {
-      for(int j=1; j<=2; j++) {
-        File dir = new File(dataDir, "data"+(2*i+j)+MiniDFSCluster.FINALIZED_DIR_NAME);
+      for(int j=0; j<=1; j++) {
+        File storageDir = MiniDFSCluster.getStorageDir(i, j);
+        File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         if(dir == null) {
           System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
           continue;

+ 10 - 6
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -60,9 +60,11 @@ public class TestDiskError extends TestCase {
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     final int dnIndex = 0;
-    String dataDir = cluster.getDataDirectory();
-    File dir1 = new File(new File(dataDir, "data"+(2*dnIndex+1)), "current/rbw");
-    File dir2 = new File(new File(dataDir, "data"+(2*dnIndex+2)), "current/rbw");
+    String bpid = cluster.getNamesystem().getPoolId();
+    File storageDir = MiniDFSCluster.getStorageDir(dnIndex, 0);
+    File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
+    storageDir = MiniDFSCluster.getStorageDir(dnIndex, 1);
+    File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
     try {
       // make the data directory of the first datanode to be readonly
       assertTrue(dir1.setReadOnly());
@@ -132,9 +134,11 @@ public class TestDiskError extends TestCase {
       out.close();
       
       // the temporary block & meta files should be deleted
-      String dataDir = cluster.getDataDirectory();
-      File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "current/rbw");
-      File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "current/rbw");
+      String bpid = cluster.getNamesystem().getPoolId();
+      File storageDir = MiniDFSCluster.getStorageDir(sndNode, 0);
+      File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
+      storageDir = MiniDFSCluster.getStorageDir(sndNode, 1);
+      File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
       while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
         Thread.sleep(100);
       }

+ 21 - 25
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.io.IOUtils;
@@ -246,14 +247,10 @@ public class TestFsck extends TestCase {
       String[] fileNames = util.getFileNames(topDir);
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                           cluster.getNameNodePort()), conf);
-      String block = dfsClient.getNamenode().
-                      getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
-                      get(0).getBlock().getBlockName();
-      File baseDir = new File(System.getProperty("test.build.data",
-                                                 "build/test/data"),"dfs/data");
-      for (int i=0; i<8; i++) {
-        File blockFile = new File(baseDir, "data" +(i+1) + 
-            MiniDFSCluster.FINALIZED_DIR_NAME + block);
+      ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
+          fileNames[0], 0, Long.MAX_VALUE).get(0).getBlock();
+      for (int i=0; i<4; i++) {
+        File blockFile = MiniDFSCluster.getBlockFile(i, block);
         if(blockFile.exists()) {
           assertTrue(blockFile.delete());
         }
@@ -354,7 +351,7 @@ public class TestFsck extends TestCase {
     DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
     // Wait until file replication has completed
     DFSTestUtil.waitReplication(fs, file1, (short)3);
-    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
 
     // Make sure filesystem is in healthy state
     outStr = runFsck(conf, 0, true, "/");
@@ -362,11 +359,8 @@ public class TestFsck extends TestCase {
     assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     
     // corrupt replicas 
-    File baseDir = new File(System.getProperty("test.build.data",
-                                               "build/test/data"),"dfs/data");
-    for (int i=0; i < 6; i++) {
-      File blockFile = new File(baseDir, "data" + (i+1) + 
-          MiniDFSCluster.FINALIZED_DIR_NAME + block);
+    for (int i=0; i < 3; i++) {
+      File blockFile = MiniDFSCluster.getBlockFile(i, block);
       if (blockFile.exists()) {
         RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
         FileChannel channel = raFile.getChannel();
@@ -469,19 +463,21 @@ public class TestFsck extends TestCase {
       System.out.println("1. good fsck out: " + outStr);
       assertTrue(outStr.contains("has 0 CORRUPT files"));
       // delete the blocks
-      File baseDir = new File(System.getProperty("test.build.data",
-      "build/test/data"),"dfs/data");
-      for (int i=0; i<8; i++) {
-        File data_dir = new File(baseDir, "data" +(i+1)+ MiniDFSCluster.FINALIZED_DIR_NAME);
-        File[] blocks = data_dir.listFiles();
-        if (blocks == null)
-          continue;
-
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
+      final String bpid = cluster.getNamesystem().getPoolId();
+      for (int i=0; i<4; i++) {
+        for (int j=0; j<=1; j++) {
+          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+          File[] blocks = data_dir.listFiles();
+          if (blocks == null)
             continue;
+  
+          for (int idx = 0; idx < blocks.length; idx++) {
+            if (!blocks[idx].getName().startsWith("blk_")) {
+              continue;
+            }
+            assertTrue("Cannot remove file.", blocks[idx].delete());
           }
-          assertTrue("Cannot remove file.", blocks[idx].delete());
         }
       }
 

+ 30 - 26
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java

@@ -138,23 +138,24 @@ public class TestListCorruptFileBlocks extends TestCase {
       int numCorrupt = corruptFileBlocks.size();
       assertTrue(numCorrupt == 0);
       // delete the blocks
-      File baseDir = new File(System.getProperty("test.build.data",
-          "build/test/data"), "dfs/data");
-      for (int i = 0; i < 8; i++) {
-        File data_dir = new File(baseDir, "data" + (i + 1)
-            + MiniDFSCluster.FINALIZED_DIR_NAME);
-        File[] blocks = data_dir.listFiles();
-        if (blocks == null)
-          continue;
-        // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
-        // (blocks.length > 0));
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
+      String bpid = cluster.getNamesystem().getPoolId();
+      for (int i = 0; i < 4; i++) {
+        for (int j = 0; j <= 1; j++) {
+          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+          File[] blocks = data_dir.listFiles();
+          if (blocks == null)
             continue;
+          // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
+          // (blocks.length > 0));
+          for (int idx = 0; idx < blocks.length; idx++) {
+            if (!blocks[idx].getName().startsWith("blk_")) {
+              continue;
+            }
+            LOG.info("Deliberately removing file " + blocks[idx].getName());
+            assertTrue("Cannot remove file.", blocks[idx].delete());
+            // break;
           }
-          LOG.info("Deliberately removing file " + blocks[idx].getName());
-          assertTrue("Cannot remove file.", blocks[idx].delete());
-          // break;
         }
       }
 
@@ -232,19 +233,22 @@ public class TestListCorruptFileBlocks extends TestCase {
           badFiles.size() == 0);
 
       // Now deliberately blocks from all files
-      File baseDir = new File(System.getProperty("test.build.data",
-      "build/test/data"),"dfs/data");
-      for (int i=0; i<8; i++) {
-        File data_dir = new File(baseDir, "data" +(i+1)+ MiniDFSCluster.FINALIZED_DIR_NAME);
-        File[] blocks = data_dir.listFiles();
-        if (blocks == null)
-          continue;
-
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
+      final String bpid = cluster.getNamesystem().getPoolId();
+      for (int i=0; i<4; i++) {
+        for (int j=0; j<=1; j++) {
+          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+          LOG.info("Removing files from " + data_dir);
+          File[] blocks = data_dir.listFiles();
+          if (blocks == null)
             continue;
+  
+          for (int idx = 0; idx < blocks.length; idx++) {
+            if (!blocks[idx].getName().startsWith("blk_")) {
+              continue;
+            }
+            assertTrue("Cannot remove file.", blocks[idx].delete());
           }
-          assertTrue("Cannot remove file.", blocks[idx].delete());
         }
       }
 

+ 4 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java

@@ -57,8 +57,11 @@ public class TestOverReplicatedBlocks extends TestCase {
       TestDatanodeBlockScanner.corruptReplica(block, 0);
       DataNodeProperties dnProps = cluster.stopDataNode(0);
       // remove block scanner log to trigger block scanning
+      // TODO:FEDERATION needs change when data block scanner is changed
+      // TODO:FEDERATION remove finalzied_dir_name and use methods in MiniDFSCluster
+      final String finalized_dir_name = "/current/finalized/";
       File scanLog = new File(System.getProperty("test.build.data"),
-          "dfs/data/data1" + MiniDFSCluster.FINALIZED_DIR_NAME + 
+          "dfs/data/data1" + finalized_dir_name + 
           "dncp_block_verification.log.curr");
       //wait for one minute for deletion to succeed;
       for(int i=0; !scanLog.delete(); i++) {

+ 1 - 1
src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

@@ -71,7 +71,7 @@ public class TestBlockRecovery {
   private Configuration conf;
   private final static long RECOVERY_ID = 3000L;
   // TODO:FEDERATION fix pool ID
-  private final static String POOL_ID = "TODO";
+  private final static String POOL_ID = "BP-TEST";
   private final static long BLOCK_ID = 1000L;
   private final static long GEN_STAMP = 2000L;
   private final static long BLOCK_LEN = 3000L;