Browse Source

svn merge -c 1303318 from trunk for HDFS-3086.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1303320 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 years ago
parent
commit
159307ce4f
15 changed files with 83 additions and 75 deletions
  1. 7 4
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  2. 3 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  3. 3 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
  4. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  5. 2 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  6. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
  7. 11 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
  8. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageBlockReport.java
  9. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
  10. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
  11. 1 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
  12. 17 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
  13. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
  14. 13 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  15. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -148,18 +148,21 @@ Release 0.23.3 - UNRELEASED
     HDFS-3091. Update the usage limitations of ReplaceDatanodeOnFailure policy in
     the config description for the smaller clusters. (szetszwo via umamahesh)
 
-    HDFS-3105.  Add DatanodeStorage information to block recovery.  (szetszwo)
+    HDFS-3105. Add DatanodeStorage information to block recovery.  (szetszwo)
+
+    HDFS-3086. Change Datanode not to send storage list in registration.
+    (szetszwo)
 
   OPTIMIZATIONS
+
     HDFS-2477. Optimize computing the diff between a block report and the
-               namenode state. (Tomasz Nykiel via hairong)
+    namenode state. (Tomasz Nykiel via hairong)
 
     HDFS-2495. Increase granularity of write operations in ReplicationMonitor
     thus reducing contention for write lock. (Tomasz Nykiel via hairong)
 
     HDFS-2476. More CPU efficient data structure for under-replicated,
-               over-replicated, and invalidated blocks.
-               (Tomasz Nykiel via todd)
+    over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
 
     HDFS-3036. Remove unused method DFSUtil#isDefaultNamenodeAddress. (atm)
 

+ 3 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeComm
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
@@ -50,12 +49,10 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlo
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
@@ -69,7 +66,6 @@ import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
-import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcClientUtil;
@@ -145,14 +141,10 @@ public class DatanodeProtocolClientSideTranslatorPB implements
   }
 
   @Override
-  public DatanodeRegistration registerDatanode(DatanodeRegistration registration,
-      DatanodeStorage[] storages) throws IOException {
+  public DatanodeRegistration registerDatanode(DatanodeRegistration registration
+      ) throws IOException {
     RegisterDatanodeRequestProto.Builder builder = RegisterDatanodeRequestProto
         .newBuilder().setRegistration(PBHelper.convert(registration));
-    for (DatanodeStorage s : storages) {
-      builder.addStorages(PBHelper.convert(s));
-    }
-    
     RegisterDatanodeResponseProto resp;
     try {
       resp = rpcProxy.registerDatanode(NULL_CONTROLLER, builder.build());
@@ -198,7 +190,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
     
     for (StorageBlockReport r : reports) {
       StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
-          .newBuilder().setStorageID(r.getStorageID());
+          .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
       long[] blocks = r.getBlocks();
       for (int i = 0; i < blocks.length; i++) {
         reportBuilder.addBlocks(blocks[i]);

+ 3 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java

@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
@@ -88,12 +87,8 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     DatanodeRegistration registration = PBHelper.convert(request
         .getRegistration());
     DatanodeRegistration registrationResp;
-    DatanodeStorage[] storages = new DatanodeStorage[request.getStoragesCount()];
-    for (int i = 0; i < request.getStoragesCount(); i++) {
-      storages[i] = PBHelper.convert(request.getStorages(i));
-    }
     try {
-      registrationResp = impl.registerDatanode(registration, storages);
+      registrationResp = impl.registerDatanode(registration);
     } catch (IOException e) {
       throw new ServiceException(e);
     }
@@ -148,7 +143,8 @@ public class DatanodeProtocolServerSideTranslatorPB implements
       for (int i = 0; i < blockIds.size(); i++) {
         blocks[i] = blockIds.get(i);
       }
-      report[index++] = new StorageBlockReport(s.getStorageID(), blocks);
+      report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
+          blocks);
     }
     try {
       cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -24,7 +24,6 @@ import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
@@ -384,7 +383,8 @@ class BPServiceActor implements Runnable {
       // Send block report
       long brSendStartTime = now();
       StorageBlockReport[] report = { new StorageBlockReport(
-          bpRegistration.getStorageID(), bReport.getBlockListAsLongs()) };
+          new DatanodeStorage(bpRegistration.getStorageID()),
+          bReport.getBlockListAsLongs()) };
       cmd = bpNamenode.blockReport(bpRegistration, bpos.getBlockPoolId(), report);
 
       // Log the block report processing stats from Datanode perspective
@@ -603,8 +603,7 @@ class BPServiceActor implements Runnable {
     while (shouldRun()) {
       try {
         // Use returned registration from namenode with updated machine name.
-        bpRegistration = bpNamenode.registerDatanode(bpRegistration,
-            new DatanodeStorage[0]);
+        bpRegistration = bpNamenode.registerDatanode(bpRegistration);
         break;
       } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + nnAddr);

+ 2 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -46,7 +46,6 @@ import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
 import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
-
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -96,7 +95,6 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -831,11 +829,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
 
 
   @Override // DatanodeProtocol
-  public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg,
-      DatanodeStorage[] storages) throws IOException {
+  public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg
+      ) throws IOException {
     verifyVersion(nodeReg.getVersion());
     namesystem.registerDatanode(nodeReg);
-      
     return nodeReg;
   }
 

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java

@@ -80,14 +80,12 @@ public interface DatanodeProtocol {
    *
    * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
    * @param registration datanode registration information
-   * @param storages list of storages on the datanode``
    * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains 
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    */
-  public DatanodeRegistration registerDatanode(
-      DatanodeRegistration registration, DatanodeStorage[] storages)
-      throws IOException;
+  public DatanodeRegistration registerDatanode(DatanodeRegistration registration
+      ) throws IOException;
   
   /**
    * sendHeartbeat() tells the NameNode that the DataNode is still

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java

@@ -18,9 +18,10 @@
 package org.apache.hadoop.hdfs.server.protocol;
 
 /**
- * Class capatures information about a storage in Datanode
+ * Class captures information of a storage in Datanode.
  */
 public class DatanodeStorage {
+  /** The state of the storage. */
   public enum State {
     NORMAL,
     READ_ONLY
@@ -28,7 +29,15 @@ public class DatanodeStorage {
   
   private final String storageID;
   private final State state;
-  
+
+  /**
+   * Create a storage with {@link State#NORMAL}.
+   * @param storageID
+   */
+  public DatanodeStorage(String storageID) {
+    this(storageID, State.NORMAL);
+  }
+
   public DatanodeStorage(String sid, State s) {
     storageID = sid;
     state = s;

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageBlockReport.java

@@ -22,16 +22,16 @@ package org.apache.hadoop.hdfs.server.protocol;
  * Block report for a Datanode storage
  */
 public class StorageBlockReport {
-  private final String storageID;
+  private final DatanodeStorage storage;
   private final long[] blocks;
   
-  public StorageBlockReport(String sid, long[] blocks) {
-    this.storageID = sid;
+  public StorageBlockReport(DatanodeStorage storage, long[] blocks) {
+    this.storage = storage;
     this.blocks = blocks;
   }
 
-  public String getStorageID() {
-    return storageID;
+  public DatanodeStorage getStorage() {
+    return storage;
   }
 
   public long[] getBlocks() {

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto

@@ -149,7 +149,6 @@ message UpgradeCommandProto {
  */
 message RegisterDatanodeRequestProto {
   required DatanodeRegistrationProto registration = 1; // Datanode info
-  repeated DatanodeStorageProto storages = 2; // Storages on the datanode
 }
 
 /**
@@ -227,7 +226,7 @@ message BlockReportRequestProto {
  * Report of blocks in a storage
  */
 message StorageBlockReportProto {
-  required string storageID = 1;    // Storage ID
+  required DatanodeStorageProto storage = 1;    // Storage
   repeated uint64 blocks = 2 [packed=true];
 }
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java

@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -36,7 +38,6 @@ import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State;
@@ -76,7 +77,7 @@ public class TestBPOfferService {
   private NNHAStatusHeartbeat[] mockHaStatuses = new NNHAStatusHeartbeat[2];
   private int heartbeatCounts[] = new int[2];
   private DataNode mockDn;
-  private FSDatasetInterface mockFSDataset;
+  private FSDatasetInterface<?> mockFSDataset;
   
   @Before
   public void setupMocks() throws Exception {
@@ -114,8 +115,7 @@ public class TestBPOfferService {
       .when(mock).versionRequest();
     
     Mockito.doReturn(new DatanodeRegistration("fake-node"))
-      .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class),
-          Mockito.any(DatanodeStorage[].class));
+      .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
       .when(mock).sendHeartbeat(
@@ -161,10 +161,10 @@ public class TestBPOfferService {
       waitForInitialization(bpos);
       
       // The DN should have register to both NNs.
-      Mockito.verify(mockNN1).registerDatanode(Mockito.any(DatanodeRegistration.class),
-          Mockito.any(DatanodeStorage[].class));
-      Mockito.verify(mockNN2).registerDatanode(Mockito.any(DatanodeRegistration.class),
-          Mockito.any(DatanodeStorage[].class));
+      Mockito.verify(mockNN1).registerDatanode(
+          Mockito.any(DatanodeRegistration.class));
+      Mockito.verify(mockNN2).registerDatanode(
+          Mockito.any(DatanodeRegistration.class));
       
       // Should get block reports from both NNs
       waitForBlockReport(mockNN1);

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlo
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
@@ -138,8 +137,7 @@ public class TestBlockRecovery {
         return (DatanodeRegistration) invocation.getArguments()[0];
       }
     }).when(namenode).registerDatanode(
-        Mockito.any(DatanodeRegistration.class),
-        Mockito.any(DatanodeStorage[].class));
+        Mockito.any(DatanodeRegistration.class));
 
     when(namenode.versionRequest()).thenReturn(new NamespaceInfo
         (1, CLUSTER_ID, POOL_ID, 1L, 1));

+ 17 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -147,7 +148,8 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+    StorageBlockReport[] report = { new StorageBlockReport(
+        new DatanodeStorage(dnR.getStorageID()),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
 
@@ -228,7 +230,8 @@ public class TestBlockReport {
     // all blocks belong to the same file, hence same BP
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+    StorageBlockReport[] report = { new StorageBlockReport(
+        new DatanodeStorage(dnR.getStorageID()),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
 
@@ -269,7 +272,8 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+    StorageBlockReport[] report = { new StorageBlockReport(
+        new DatanodeStorage(dnR.getStorageID()),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     DatanodeCommand dnCmd =
       cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
@@ -322,7 +326,8 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+    StorageBlockReport[] report = { new StorageBlockReport(
+        new DatanodeStorage(dnR.getStorageID()),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
@@ -372,7 +377,8 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+    StorageBlockReport[] report = { new StorageBlockReport(
+        new DatanodeStorage(dnR.getStorageID()),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
@@ -395,7 +401,8 @@ public class TestBlockReport {
       LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
     }
     
-    report[0] = new StorageBlockReport(dnR.getStorageID(),
+    report[0] = new StorageBlockReport(
+        new DatanodeStorage(dnR.getStorageID()),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
     cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
@@ -446,7 +453,8 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+      StorageBlockReport[] report = { new StorageBlockReport(
+          new DatanodeStorage(dnR.getStorageID()),
           new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
       cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
@@ -493,7 +501,8 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+      StorageBlockReport[] report = { new StorageBlockReport(
+          new DatanodeStorage(dnR.getStorageID()),
           new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
       cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.net.NetUtils;
@@ -146,7 +147,8 @@ public class TestDataNodeVolumeFailure {
     String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
     final StorageBlockReport[] report = {
-        new StorageBlockReport(dnR.getStorageID(),
+        new StorageBlockReport(
+            new DatanodeStorage(dnR.getStorageID()),
             DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid
                 ).getBlockListAsLongs())
     };

+ 13 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -31,6 +31,8 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -51,8 +53,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
@@ -612,7 +612,6 @@ public class NNThroughputBenchmark {
       super.parseArguments(args);
     }
 
-    @SuppressWarnings("deprecation")
     void generateInputs(int[] opsPerThread) throws IOException {
       // create files using opsPerThread
       String[] createArgs = new String[] {
@@ -742,7 +741,6 @@ public class NNThroughputBenchmark {
       }
     }
 
-    @SuppressWarnings("deprecation")
     long executeOp(int daemonId, int inputIdx, String ignore) 
     throws IOException {
       long start = System.currentTimeMillis();
@@ -762,6 +760,7 @@ public class NNThroughputBenchmark {
     
     NamespaceInfo nsInfo;
     DatanodeRegistration dnRegistration;
+    DatanodeStorage storage; //only one storage 
     ArrayList<Block> blocks;
     int nrBlocks; // actual number of blocks
     long[] blockReportList;
@@ -797,10 +796,15 @@ public class NNThroughputBenchmark {
       dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
-      
-      DatanodeStorage[] storages = { new DatanodeStorage(
-          dnRegistration.getStorageID(), DatanodeStorage.State.NORMAL) };
-      dnRegistration = nameNodeProto.registerDatanode(dnRegistration, storages);
+      dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
+      //first block reports
+      storage = new DatanodeStorage(dnRegistration.getStorageID());
+      final StorageBlockReport[] reports = {
+          new StorageBlockReport(storage,
+              new BlockListAsLongs(null, null).getBlockListAsLongs())
+      };
+      nameNodeProto.blockReport(dnRegistration, 
+          nameNode.getNamesystem().getBlockPoolId(), reports);
     }
 
     /**
@@ -1032,7 +1036,7 @@ public class NNThroughputBenchmark {
       TinyDatanode dn = datanodes[daemonId];
       long start = System.currentTimeMillis();
       StorageBlockReport[] report = { new StorageBlockReport(
-          dn.dnRegistration.getStorageID(), dn.getBlockReportList()) };
+          dn.storage, dn.getBlockReportList()) };
       nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem()
           .getBlockPoolId(), report);
       long end = System.currentTimeMillis();

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
@@ -125,7 +126,8 @@ public class TestDeadDatanode {
     }
 
     // Ensure blockReport from dead datanode is rejected with IOException
-    StorageBlockReport[] report = { new StorageBlockReport(reg.getStorageID(),
+    StorageBlockReport[] report = { new StorageBlockReport(
+        new DatanodeStorage(reg.getStorageID()),
         new long[] { 0L, 0L, 0L }) };
     try {
       dnp.blockReport(reg, poolId, report);