فهرست منبع

Fix trunk to federation branch integration issues.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1078974 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 سال پیش
والد
کامیت
a6a5bc2957

+ 5 - 6
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java

@@ -248,17 +248,16 @@ public class TestListCorruptFileBlocks {
       DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
       util.createFiles(fs, "/corruptData");
 
-      final NameNode namenode = cluster.getNameNode();
       RemoteIterator<Path> corruptFileBlocks = 
         dfs.listCorruptFileBlocks(new Path("/corruptData"));
       int numCorrupt = countPaths(corruptFileBlocks);
       assertTrue(numCorrupt == 0);
       // delete the blocks
-      File baseDir = new File(System.getProperty("test.build.data",
-          "build/test/data"), "dfs/data");
-      for (int i = 0; i < 8; i++) {
-        File data_dir = new File(baseDir, "data" + (i + 1)
-            + MiniDFSCluster.FINALIZED_DIR_NAME);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      // For loop through number of datadirectories per datanode (2)
+      for (int i = 0; i < 2; i++) {
+        File storageDir = MiniDFSCluster.getStorageDir(0, i);
+        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         File[] blocks = data_dir.listFiles();
         if (blocks == null)
           continue;

+ 6 - 7
src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService;
 import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -153,12 +152,12 @@ public class TestBlockRecovery {
     syncList.add(record1);
     syncList.add(record2);
     
-    when(dn1.updateReplicaUnderRecovery((Block)anyObject(), anyLong(), 
-        anyLong())).thenReturn(new Block(block.getBlockId(), 
-            expectLen, block.getGenerationStamp()));
-    when(dn2.updateReplicaUnderRecovery((Block)anyObject(), anyLong(), 
-        anyLong())).thenReturn(new Block(block.getBlockId(), 
-            expectLen, block.getGenerationStamp()));
+    when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
+        anyLong())).thenReturn(new ExtendedBlock(block.getBlockPoolId(), 
+            block.getBlockId(), expectLen, block.getGenerationStamp()));
+    when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
+        anyLong())).thenReturn(new ExtendedBlock(block.getBlockPoolId(), 
+            block.getBlockId(), expectLen, block.getGenerationStamp()));
     dn.syncBlock(rBlock, syncList);
   }