浏览代码

HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is not accurate. (Contributed by Eric Sirianni)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1547462 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 年之前
父节点
当前提交
a1aa1836fb

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt

@@ -120,3 +120,6 @@ IMPROVEMENTS:
 
     HDFS-5559. Fix TestDatanodeConfig in HDFS-2832. (Contributed by szetszwo)
 
+    HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is
+    not accurate. (Eric Sirianni via Arpit Agarwal)
+

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -454,7 +454,7 @@ class BPServiceActor implements Runnable {
       long brCreateStartTime = now();
       long totalBlockCount = 0;
 
-      Map<String, BlockListAsLongs> perVolumeBlockLists =
+      Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
           dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());
 
       // Send block report
@@ -463,13 +463,11 @@ class BPServiceActor implements Runnable {
           new StorageBlockReport[perVolumeBlockLists.size()];
 
       int i = 0;
-      for(Map.Entry<String, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
-        String storageID = kvPair.getKey();
+      for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
+        DatanodeStorage dnStorage = kvPair.getKey();
         BlockListAsLongs blockList = kvPair.getValue();
         totalBlockCount += blockList.getNumberOfBlocks();
 
-        // Dummy DatanodeStorage object just for sending the block report.
-        DatanodeStorage dnStorage = new DatanodeStorage(storageID);
         reports[i++] =
             new StorageBlockReport(
               dnStorage, blockList.getBlockListAsLongs());

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -268,9 +269,9 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
   /**
    * Returns one block report per volume.
    * @param bpid Block Pool Id
-   * @return - a map of StorageID to block report for the volume.
+   * @return - a map of DatanodeStorage to block report for the volume.
    */
-  public Map<String, BlockListAsLongs> getBlockReports(String bpid);
+  public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
 
   /**
    * Returns the cache report - the full list of cached block IDs of a

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosing
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -1089,14 +1090,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   }
 
   @Override
-  public Map<String, BlockListAsLongs> getBlockReports(String bpid) {
-    Map<String, BlockListAsLongs> blockReportMap =
-        new HashMap<String, BlockListAsLongs>();
+  public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
+    Map<DatanodeStorage, BlockListAsLongs> blockReportMap =
+        new HashMap<DatanodeStorage, BlockListAsLongs>();
 
     for (FsVolumeImpl v : getVolumes()) {
       ReplicaMap rMap = perVolumeReplicaMap.get(v.getStorageID());
       BlockListAsLongs blockList = getBlockReportWithReplicaMap(bpid, rMap);
-      blockReportMap.put(v.getStorageID(), blockList);
+      blockReportMap.put(v.toDatanodeStorage(), blockList);
     }
 
     return blockReportMap;

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -332,5 +333,10 @@ class FsVolumeImpl implements FsVolumeSpi {
   public StorageType getStorageType() {
     return storageType;
   }
+  
+  DatanodeStorage toDatanodeStorage() {
+    return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType);
+  }
+
 }
 

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -88,6 +88,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.web.HftpFileSystem;
@@ -1970,7 +1971,7 @@ public class MiniDFSCluster {
    * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
    * @return the block report for the specified data node
    */
-  public Map<String, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
+  public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
     if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
       throw new IndexOutOfBoundsException();
     }
@@ -1984,10 +1985,10 @@ public class MiniDFSCluster {
    * @return block reports from all data nodes
    *    BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes()
    */
-  public List<Map<String, BlockListAsLongs>> getAllBlockReports(String bpid) {
+  public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) {
     int numDataNodes = dataNodes.size();
-    final List<Map<String, BlockListAsLongs>> result
-        = new ArrayList<Map<String, BlockListAsLongs>>(numDataNodes);
+    final List<Map<DatanodeStorage, BlockListAsLongs>> result
+        = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes);
     for (int i = 0; i < numDataNodes; ++i) {
       result.add(getBlockReport(bpid, i));
     }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile;
@@ -1394,11 +1395,11 @@ public class TestDFSShell {
     List<File> files = new ArrayList<File>();
     List<DataNode> datanodes = cluster.getDataNodes();
     String poolId = cluster.getNamesystem().getBlockPoolId();
-    List<Map<String, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
+    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
     for(int i = 0; i < blocks.size(); i++) {
       DataNode dn = datanodes.get(i);
-      Map<String, BlockListAsLongs> map = blocks.get(i);
-      for(Map.Entry<String, BlockListAsLongs> e : map.entrySet()) {
+      Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
+      for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
         for(Block b : e.getValue()) {
           files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
         }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
@@ -136,7 +137,7 @@ public class TestInjectionForSimulatedStorage {
       DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize,
           filesize, blockSize, (short) numDataNodes, 0L);
       waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
-      List<Map<String, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
+      List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
       
       cluster.shutdown();
       cluster = null;
@@ -157,7 +158,7 @@ public class TestInjectionForSimulatedStorage {
                                   .build();
       cluster.waitActive();
       Set<Block> uniqueBlocks = new HashSet<Block>();
-      for(Map<String, BlockListAsLongs> map : blocksList) {
+      for(Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
         for(BlockListAsLongs blockList : map.values()) {
           for(Block b : blockList) {
             uniqueBlocks.add(new Block(b));

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -484,12 +485,9 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public synchronized Map<String, BlockListAsLongs> getBlockReports(
+  public synchronized Map<DatanodeStorage, BlockListAsLongs> getBlockReports(
       String bpid) {
-    Map<String, BlockListAsLongs> reports =
-        new HashMap<String, BlockListAsLongs>();
-    reports.put(storage.storageUuid, getBlockReport(bpid));
-    return reports;
+    return Collections.singletonMap(new DatanodeStorage(storage.storageUuid), getBlockReport(bpid));
   }
 
   @Override // FsDatasetSpi

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java

@@ -120,7 +120,7 @@ public class TestBlockReport {
   private static StorageBlockReport[] getBlockReports(
       DataNode dn, String bpid, boolean corruptOneBlockGs,
       boolean corruptOneBlockLen) {
-    Map<String, BlockListAsLongs> perVolumeBlockLists =
+    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
         dn.getFSDataset().getBlockReports(bpid);
 
     // Send block report
@@ -130,8 +130,8 @@ public class TestBlockReport {
     boolean corruptedLen = false;
 
     int reportIndex = 0;
-    for(Map.Entry<String, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
-      String storageID = kvPair.getKey();
+    for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
+      DatanodeStorage dnStorage = kvPair.getKey();
       BlockListAsLongs blockList = kvPair.getValue();
 
       // Walk the list of blocks until we find one each to corrupt the
@@ -150,8 +150,6 @@ public class TestBlockReport {
         }
       }
 
-      // Dummy DatanodeStorage object just for sending the block report.
-      DatanodeStorage dnStorage = new DatanodeStorage(storageID);
       reports[reportIndex++] =
           new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
     }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -154,7 +154,7 @@ public class TestDataNodeVolumeFailure {
     String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
     
-    Map<String, BlockListAsLongs> perVolumeBlockLists =
+    Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
         dn.getFSDataset().getBlockReports(bpid);
 
     // Send block report
@@ -162,10 +162,9 @@ public class TestDataNodeVolumeFailure {
         new StorageBlockReport[perVolumeBlockLists.size()];
 
     int reportIndex = 0;
-    for(Map.Entry<String, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
-        String storageID = kvPair.getKey();
+    for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
+        DatanodeStorage dnStorage = kvPair.getKey();
         BlockListAsLongs blockList = kvPair.getValue();
-        DatanodeStorage dnStorage = new DatanodeStorage(storageID);
         reports[reportIndex++] =
             new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
     }