Bladeren bron

HDFS-2140. Move Host2NodesMap to the blockmanagement package.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1146514 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 14 jaren geleden
bovenliggende
commit
8327e70be8

+ 2 - 0
hdfs/CHANGES.txt

@@ -552,6 +552,8 @@ Trunk (unreleased changes)
     HDFS-2131. Add new tests for the -overwrite/-f option in put and
     copyFromLocal by HADOOP-7361.  (Uma Maheswara Rao G via szetszwo)
 
+    HDFS-2140. Move Host2NodesMap to the blockmanagement package.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

+ 5 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -221,6 +221,11 @@ public class BlockManager {
     datanodeManager.close();
   }
 
+  /** @return the datanodeManager */
+  public DatanodeManager getDatanodeManager() {
+    return datanodeManager;
+  }
+
   public void metaSave(PrintWriter out) {
     //
     // Dump contents of neededReplication

+ 40 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.util.Daemon;
 
@@ -35,6 +38,8 @@ public class DatanodeManager {
   static final Log LOG = LogFactory.getLog(DatanodeManager.class);
 
   final FSNamesystem namesystem;
+
+  private final Host2NodesMap host2DatanodeMap = new Host2NodesMap();
   
   DatanodeManager(final FSNamesystem namesystem) {
     this.namesystem = namesystem;
@@ -54,4 +59,39 @@ public class DatanodeManager {
   void close() {
     if (decommissionthread != null) decommissionthread.interrupt();
   }
+
+  /** @return the datanode descriptor for the host. */
+  public DatanodeDescriptor getDatanodeByHost(final String host) {
+    return host2DatanodeMap.getDatanodeByHost(host);
+  }
+
+  /** Add a datanode. */
+  public void addDatanode(final DatanodeDescriptor node) {
+    // To keep host2DatanodeMap consistent with datanodeMap,
+    // remove  from host2DatanodeMap the datanodeDescriptor removed
+    // from datanodeMap before adding node to host2DatanodeMap.
+    synchronized (namesystem.datanodeMap) {
+      host2DatanodeMap.remove(
+          namesystem.datanodeMap.put(node.getStorageID(), node));
+    }
+    host2DatanodeMap.add(node);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(getClass().getSimpleName() + ".unprotectedAddDatanode: "
+          + "node " + node.getName() + " is added to datanodeMap.");
+    }
+  }
+
+  /** Physically remove node from datanodeMap. */
+  public void wipeDatanode(final DatanodeID node) throws IOException {
+    final String key = node.getStorageID();
+    synchronized (namesystem.datanodeMap) {
+      host2DatanodeMap.remove(namesystem.datanodeMap.remove(key));
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
+          + node.getName() + "): storage " + key 
+          + " is removed from datanodeMap.");
+    }
+  }
 }

+ 6 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java

@@ -15,15 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.HashMap;
 import java.util.Random;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 
+/** A map from host names to datanode descriptors. */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 class Host2NodesMap {
   private HashMap<String, DatanodeDescriptor[]> map
     = new HashMap<String, DatanodeDescriptor[]>();

+ 8 - 46
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -317,7 +317,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       ReplaceDatanodeOnFailure.DEFAULT;
 
   private volatile SafeModeInfo safeMode;  // safe mode information
-  private Host2NodesMap host2DataNodeMap = new Host2NodesMap();
     
   /** datanode network toplogy */
   public NetworkTopology clusterMap = new NetworkTopology();
@@ -878,8 +877,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true);
     if (blocks != null) {
       //sort the blocks
-      DatanodeDescriptor client = host2DataNodeMap.getDatanodeByHost(
-          clientMachine);
+      final DatanodeDescriptor client = 
+          blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
       for (LocatedBlock b : blocks.getLocatedBlocks()) {
         clusterMap.pseudoSortByDistance(client, b.getLocations());
         
@@ -1490,8 +1489,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
         }
       }
 
-      DatanodeDescriptor clientNode = 
-        host2DataNodeMap.getDatanodeByHost(clientMachine);
+      final DatanodeDescriptor clientNode = 
+          blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
 
       if (append && myFile != null) {
         //
@@ -2842,7 +2841,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
                                  + " storage " + nodeReg.getStorageID());
 
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
-    DatanodeDescriptor nodeN = host2DataNodeMap.getDatanodeByName(nodeReg.getName());
+    DatanodeDescriptor nodeN =
+        blockManager.getDatanodeManager().getDatanodeByHost(nodeReg.getName());
       
     if (nodeN != null && nodeN != nodeS) {
       NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
@@ -2851,7 +2851,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       // which is not served by anybody anymore.
       removeDatanode(nodeN);
       // physically remove node from datanodeMap
-      wipeDatanode(nodeN);
+      blockManager.getDatanodeManager().wipeDatanode(nodeN);
       nodeN = null;
     }
 
@@ -2918,7 +2918,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     DatanodeDescriptor nodeDescr 
       = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
     resolveNetworkLocation(nodeDescr);
-    unprotectedAddDatanode(nodeDescr);
+    blockManager.getDatanodeManager().addDatanode(nodeDescr);
     clusterMap.add(nodeDescr);
     checkDecommissioning(nodeDescr, dnAddress);
     
@@ -3356,44 +3356,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
           + nodeDescr.getName() + " is out of service now.");
     }
   }
-    
-  void unprotectedAddDatanode(DatanodeDescriptor nodeDescr) {
-    assert hasWriteLock();
-    // To keep host2DataNodeMap consistent with datanodeMap,
-    // remove  from host2DataNodeMap the datanodeDescriptor removed
-    // from datanodeMap before adding nodeDescr to host2DataNodeMap.
-    synchronized (datanodeMap) {
-      host2DataNodeMap.remove(
-                            datanodeMap.put(nodeDescr.getStorageID(), nodeDescr));
-    }
-    host2DataNodeMap.add(nodeDescr);
-      
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug(
-          "BLOCK* NameSystem.unprotectedAddDatanode: "
-          + "node " + nodeDescr.getName() + " is added to datanodeMap.");
-    }
-  }
-
-  /**
-   * Physically remove node from datanodeMap.
-   *
-   * @param nodeID node
-   * @throws IOException
-   */
-  void wipeDatanode(DatanodeID nodeID) throws IOException {
-    assert hasWriteLock();
-    String key = nodeID.getStorageID();
-    synchronized (datanodeMap) {
-      host2DataNodeMap.remove(datanodeMap.remove(key));
-    }
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug(
-          "BLOCK* NameSystem.wipeDatanode: "
-          + nodeID.getName() + " storage " + key 
-          + " is removed from datanodeMap.");
-    }
-  }
 
   FSImage getFSImage() {
     return dir.fsImage;

+ 2 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java

@@ -16,12 +16,13 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
 
 public class TestHost2NodesMap extends TestCase {
   static private Host2NodesMap map = new Host2NodesMap();