Browse Source

HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed by Junping Du)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1540228 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 years ago
parent
commit
90d1b1b6a4

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt

@@ -85,6 +85,9 @@ IMPROVEMENTS:
     HDFS-5472. Fix TestDatanodeManager, TestSafeMode and
     HDFS-5472. Fix TestDatanodeManager, TestSafeMode and
     TestNNThroughputBenchmark (Contributed by szetszwo)
     TestNNThroughputBenchmark (Contributed by szetszwo)
 
 
-    HDFS-5475.  NN incorrectly tracks more than one replica per DN. (Arpit
+    HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit
     Agarwal)
     Agarwal)
 
 
+    HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed
+    by Junping Du)
+

+ 20 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -42,11 +42,13 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -151,13 +153,24 @@ public class TestDataNodeVolumeFailure {
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
     String bpid = cluster.getNamesystem().getBlockPoolId();
     String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
-    final StorageBlockReport[] report = {
-        new StorageBlockReport(
-            new DatanodeStorage(dnR.getDatanodeUuid()),
-            DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid
-                ).getBlockListAsLongs())
-    };
-    cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
+    
+    Map<String, BlockListAsLongs> perVolumeBlockLists =
+        dn.getFSDataset().getBlockReports(bpid);
+
+    // Send block report
+    StorageBlockReport[] reports =
+        new StorageBlockReport[perVolumeBlockLists.size()];
+
+    int reportIndex = 0;
+    for(Map.Entry<String, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
+        String storageID = kvPair.getKey();
+        BlockListAsLongs blockList = kvPair.getValue();
+        DatanodeStorage dnStorage = new DatanodeStorage(storageID);
+        reports[reportIndex++] =
+            new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
+    }
+    
+    cluster.getNameNodeRpc().blockReport(dnR, bpid, reports);
 
 
     // verify number of blocks and files...
     // verify number of blocks and files...
     verify(filename, filesize);
     verify(filename, filesize);