소스 검색

HDFS-2107. Move block management code from o.a.h.h.s.namenode to a new package o.a.h.h.s.blockmanagement.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1140939 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 14 년 전
부모
커밋
09b6f98de4
53개의 변경된 파일489개의 추가작업 그리고 350개의 파일을 삭제
  1. 3 0
      hdfs/CHANGES.txt
  2. 2 2
      hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  3. 14 13
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
  4. 17 10
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
  5. 103 73
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  6. 6 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  7. 5 2
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  8. 6 5
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  9. 2 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
  10. 38 34
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  11. 57 0
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java
  12. 3 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
  13. 6 5
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
  14. 2 3
      hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  15. 1 0
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java
  16. 18 11
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  17. 24 2
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  18. 2 0
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  19. 1 0
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  20. 3 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
  21. 34 68
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  22. 4 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java
  23. 6 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java
  24. 9 5
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
  25. 10 5
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
  26. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  27. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  28. 3 3
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  29. 4 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  30. 8 3
      hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
  31. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
  32. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  33. 8 6
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
  34. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
  35. 2 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
  36. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
  37. 9 9
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  38. 4 3
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
  39. 12 11
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
  40. 1 0
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
  41. 2 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
  42. 2 0
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
  43. 3 2
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
  44. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
  45. 7 9
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
  46. 3 2
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java
  47. 6 6
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
  48. 4 3
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
  49. 5 4
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
  50. 9 8
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
  51. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/net/TestNetworkTopology.java
  52. 13 7
      hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
  53. 0 10
      hdfs/src/webapps/hdfs/block_info_xml.jsp

+ 3 - 0
hdfs/CHANGES.txt

@@ -534,6 +534,9 @@ Trunk (unreleased changes)
 
     HDFS-2110. StreamFile and ByteRangeInputStream cleanup. (eli)
 
+    HDFS-2107. Move block management code from o.a.h.h.s.namenode to a new
+    package o.a.h.h.s.blockmanagement.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

+ 2 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -61,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy;
-import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.io.IOUtils;

+ 14 - 13
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

@@ -15,16 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
 /**
  * Internal class for block metadata.
  */
-class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
+public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
   private INodeFile inode;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface */
@@ -44,12 +45,12 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
    * Construct an entry for blocksmap
    * @param replication the block's replication factor
    */
-  protected BlockInfo(int replication) {
+  public BlockInfo(int replication) {
     this.triplets = new Object[3*replication];
     this.inode = null;
   }
   
-  protected BlockInfo(Block blk, int replication) {
+  public BlockInfo(Block blk, int replication) {
     super(blk);
     this.triplets = new Object[3*replication];
     this.inode = null;
@@ -65,11 +66,11 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
     this.inode = from.inode;
   }
 
-  INodeFile getINode() {
+  public INodeFile getINode() {
     return inode;
   }
 
-  void setINode(INodeFile inode) {
+  public void setINode(INodeFile inode) {
     this.inode = inode;
   }
 
@@ -162,7 +163,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
   /**
    * Add data-node this block belongs to.
    */
-  boolean addNode(DatanodeDescriptor node) {
+  public boolean addNode(DatanodeDescriptor node) {
     if(findDatanode(node) >= 0) // the node is already there
       return false;
     // find the last null node
@@ -176,7 +177,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
   /**
    * Remove data-node from the block.
    */
-  boolean removeNode(DatanodeDescriptor node) {
+  public boolean removeNode(DatanodeDescriptor node) {
     int dnIndex = findDatanode(node);
     if(dnIndex < 0) // the node is not found
       return false;
@@ -218,7 +219,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
    * If the head is null then form a new list.
    * @return current block as the new head of the list.
    */
-  BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
+  public BlockInfo listInsert(BlockInfo head, DatanodeDescriptor dn) {
     int dnIndex = this.findDatanode(dn);
     assert dnIndex >= 0 : "Data node is not found: current";
     assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : 
@@ -238,7 +239,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
    * @return the new head of the list or null if the list becomes
    * empty after deletion.
    */
-  BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
+  public BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
     if(head == null)
       return null;
     int dnIndex = this.findDatanode(dn);
@@ -284,7 +285,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
    * to {@link BlockInfoUnderConstruction}.
    * @return {@link BlockUCState#COMPLETE}
    */
-  BlockUCState getBlockUCState() {
+  public BlockUCState getBlockUCState() {
     return BlockUCState.COMPLETE;
   }
 
@@ -293,7 +294,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
    * 
    * @return true if the state of the block is {@link BlockUCState#COMPLETE}
    */
-  boolean isComplete() {
+  public boolean isComplete() {
     return getBlockUCState().equals(BlockUCState.COMPLETE);
   }
 
@@ -302,7 +303,7 @@ class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
    * 
    * @return BlockInfoUnderConstruction -  an under construction block.
    */
-  BlockInfoUnderConstruction convertToBlockUnderConstruction(
+  public BlockInfoUnderConstruction convertToBlockUnderConstruction(
       BlockUCState s, DatanodeDescriptor[] targets) {
     if(isComplete()) {
       return new BlockInfoUnderConstruction(

+ 17 - 10
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -24,12 +24,13 @@ import java.util.List;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 /**
  * Represents a block that is currently being constructed.<br>
  * This is usually the last block of a file opened for write or append.
  */
-class BlockInfoUnderConstruction extends BlockInfo {
+public class BlockInfoUnderConstruction extends BlockInfo {
   /** Block state. See {@link BlockUCState} */
   private BlockUCState blockUCState;
 
@@ -128,11 +129,14 @@ class BlockInfoUnderConstruction extends BlockInfo {
    * Create block and set its state to
    * {@link BlockUCState#UNDER_CONSTRUCTION}.
    */
-  BlockInfoUnderConstruction(Block blk, int replication) {
+  public BlockInfoUnderConstruction(Block blk, int replication) {
     this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null);
   }
 
-  BlockInfoUnderConstruction(Block blk, int replication,
+  /**
+   * Create a block that is currently being constructed.
+   */
+  public BlockInfoUnderConstruction(Block blk, int replication,
                              BlockUCState state,
                              DatanodeDescriptor[] targets) {
     super(blk, replication);
@@ -160,7 +164,8 @@ class BlockInfoUnderConstruction extends BlockInfo {
     return new BlockInfo(this);
   }
 
-  void setExpectedLocations(DatanodeDescriptor[] targets) {
+  /** Set expected locations */
+  public void setExpectedLocations(DatanodeDescriptor[] targets) {
     int numLocations = targets == null ? 0 : targets.length;
     this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations);
     for(int i = 0; i < numLocations; i++)
@@ -172,7 +177,7 @@ class BlockInfoUnderConstruction extends BlockInfo {
    * Create array of expected replica locations
    * (as has been assigned by chooseTargets()).
    */
-  DatanodeDescriptor[] getExpectedLocations() {
+  public DatanodeDescriptor[] getExpectedLocations() {
     int numLocations = replicas == null ? 0 : replicas.size();
     DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocations];
     for(int i = 0; i < numLocations; i++)
@@ -180,7 +185,8 @@ class BlockInfoUnderConstruction extends BlockInfo {
     return locations;
   }
 
-  int getNumExpectedLocations() {
+  /** Get the number of expected locations */
+  public int getNumExpectedLocations() {
     return replicas == null ? 0 : replicas.size();
   }
 
@@ -189,7 +195,7 @@ class BlockInfoUnderConstruction extends BlockInfo {
    * @see BlockUCState
    */
   @Override // BlockInfo
-  BlockUCState getBlockUCState() {
+  public BlockUCState getBlockUCState() {
     return blockUCState;
   }
 
@@ -197,7 +203,8 @@ class BlockInfoUnderConstruction extends BlockInfo {
     blockUCState = s;
   }
 
-  long getBlockRecoveryId() {
+  /** Get block recovery ID */
+  public long getBlockRecoveryId() {
     return blockRecoveryId;
   }
 
@@ -220,7 +227,7 @@ class BlockInfoUnderConstruction extends BlockInfo {
    * Find the first alive data-node starting from the previous primary and
    * make it primary.
    */
-  void initializeBlockRecovery(long recoveryId) {
+  public void initializeBlockRecovery(long recoveryId) {
     setBlockUCState(BlockUCState.UNDER_RECOVERY);
     blockRecoveryId = recoveryId;
     if (replicas.size() == 0) {

+ 103 - 73
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
 import java.io.PrintWriter;
@@ -39,10 +39,14 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks.BlockIterator;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
-import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 /**
  * Keeps information related to the blocks stored in the Hadoop cluster.
@@ -57,18 +61,43 @@ public class BlockManager {
 
   private final FSNamesystem namesystem;
 
-  volatile long pendingReplicationBlocksCount = 0L;
-  volatile long corruptReplicaBlocksCount = 0L;
-  volatile long underReplicatedBlocksCount = 0L;
-  volatile long scheduledReplicationBlocksCount = 0L;
-  volatile long excessBlocksCount = 0L;
-  volatile long pendingDeletionBlocksCount = 0L;
+  private volatile long pendingReplicationBlocksCount = 0L;
+  private volatile long corruptReplicaBlocksCount = 0L;
+  private volatile long underReplicatedBlocksCount = 0L;
+  public volatile long scheduledReplicationBlocksCount = 0L;
+  private volatile long excessBlocksCount = 0L;
+  private volatile long pendingDeletionBlocksCount = 0L;
 
-  //
-  // Mapping: Block -> { INode, datanodes, self ref }
-  // Updated only in response to client-sent information.
-  //
-  final BlocksMap blocksMap;
+  /** Used by metrics */
+  public long getPendingReplicationBlocksCount() {
+    return pendingReplicationBlocksCount;
+  }
+  /** Used by metrics */
+  public long getUnderReplicatedBlocksCount() {
+    return underReplicatedBlocksCount;
+  }
+  /** Used by metrics */
+  public long getCorruptReplicaBlocksCount() {
+    return corruptReplicaBlocksCount;
+  }
+  /** Used by metrics */
+  public long getScheduledReplicationBlocksCount() {
+    return scheduledReplicationBlocksCount;
+  }
+  /** Used by metrics */
+  public long getPendingDeletionBlocksCount() {
+    return pendingDeletionBlocksCount;
+  }
+  /** Used by metrics */
+  public long getExcessBlocksCount() {
+    return excessBlocksCount;
+  }
+
+  /**
+   * Mapping: Block -> { INode, datanodes, self ref }
+   * Updated only in response to client-sent information.
+   */
+  public final BlocksMap blocksMap;
 
   //
   // Store blocks-->datanodedescriptor(s) map of corrupt replicas
@@ -90,24 +119,24 @@ public class BlockManager {
   // eventually remove these extras.
   // Mapping: StorageID -> TreeSet<Block>
   //
-  Map<String, Collection<Block>> excessReplicateMap =
+  public final Map<String, Collection<Block>> excessReplicateMap =
     new TreeMap<String, Collection<Block>>();
 
   //
   // Store set of Blocks that need to be replicated 1 or more times.
   // We also store pending replication-orders.
   //
-  UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
+  public UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
   private PendingReplicationBlocks pendingReplications;
 
   //  The maximum number of replicas allowed for a block
-  int maxReplication;
+  public int maxReplication;
   //  How many outgoing replication streams a given node should have at one time
-  int maxReplicationStreams;
+  public int maxReplicationStreams;
   // Minimum copies needed or else write is disallowed
-  int minReplication;
+  public int minReplication;
   // Default number of replicas
-  int defaultReplication;
+  public int defaultReplication;
   // How many entries are returned by getCorruptInodes()
   int maxCorruptFilesReturned;
   
@@ -121,9 +150,9 @@ public class BlockManager {
   Random r = new Random();
 
   // for block replicas placement
-  BlockPlacementPolicy replicator;
+  public BlockPlacementPolicy replicator;
 
-  BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
+  public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
     this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY);
   }
   
@@ -178,16 +207,16 @@ public class BlockManager {
     FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
   }
 
-  void activate() {
+  public void activate() {
     pendingReplications.start();
   }
 
-  void close() {
+  public void close() {
     if (pendingReplications != null) pendingReplications.stop();
     blocksMap.close();
   }
 
-  void metaSave(PrintWriter out) {
+  public void metaSave(PrintWriter out) {
     //
     // Dump contents of neededReplication
     //
@@ -249,7 +278,7 @@ public class BlockManager {
    * @param block
    * @return true if the block has minimum replicas
    */
-  boolean checkMinReplication(Block block) {
+  public boolean checkMinReplication(Block block) {
     return (countNodes(block).liveReplicas() >= minReplication);
   }
 
@@ -297,7 +326,7 @@ public class BlockManager {
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, 
+  public void commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, 
       Block commitBlock) throws IOException {
     
     if(commitBlock == null)
@@ -362,7 +391,7 @@ public class BlockManager {
    * @param fileINode file
    * @return the last block locations if the block is partial or null otherwise
    */
-  LocatedBlock convertLastBlockToUnderConstruction(
+  public LocatedBlock convertLastBlockToUnderConstruction(
       INodeFileUnderConstruction fileINode) throws IOException {
     BlockInfo oldBlock = fileINode.getLastBlock();
     if(oldBlock == null ||
@@ -393,7 +422,7 @@ public class BlockManager {
   /**
    * Get all valid locations of the block
    */
-  ArrayList<String> getValidLocations(Block block) {
+  public ArrayList<String> getValidLocations(Block block) {
     ArrayList<String> machineSet =
       new ArrayList<String>(blocksMap.numNodes(block));
     for(Iterator<DatanodeDescriptor> it =
@@ -407,7 +436,7 @@ public class BlockManager {
     return machineSet;
   }
 
-  List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
+  public List<LocatedBlock> getBlockLocations(BlockInfo[] blocks, long offset,
       long length, int nrBlocksToReturn) throws IOException {
     int curBlk = 0;
     long curPos = 0, blkSize = 0;
@@ -436,11 +465,15 @@ public class BlockManager {
     return results;
   }
 
-  /** @param needBlockToken 
-   * @return a LocatedBlock for the given block */
-  LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
+  /** @return a LocatedBlock for the given block */
+  public LocatedBlock getBlockLocation(final BlockInfo blk, final long pos
       ) throws IOException {
-    if (!blk.isComplete()) {
+    if (blk instanceof BlockInfoUnderConstruction) {
+      if (blk.isComplete()) {
+        throw new IOException(
+            "blk instanceof BlockInfoUnderConstruction && blk.isComplete()"
+            + ", blk=" + blk);
+      }
       final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)blk;
       final DatanodeDescriptor[] locations = uc.getExpectedLocations();
       return namesystem.createLocatedBlock(uc, locations, pos, false);
@@ -476,7 +509,7 @@ public class BlockManager {
    * Check whether the replication parameter is within the range
    * determined by system configuration.
    */
-   void verifyReplication(String src,
+   public void verifyReplication(String src,
                           short replication,
                           String clientName) throws IOException {
 
@@ -544,7 +577,7 @@ public class BlockManager {
    * @param b block
    * @param dn datanode
    */
-  void addToInvalidates(Block b, DatanodeInfo dn) {
+  public void addToInvalidates(Block b, DatanodeInfo dn) {
     addToInvalidates(b, dn, true);
   }
 
@@ -585,7 +618,7 @@ public class BlockManager {
     }
   }
 
-  void findAndMarkBlockAsCorrupt(Block blk,
+  public void findAndMarkBlockAsCorrupt(Block blk,
                                  DatanodeInfo dn) throws IOException {
     BlockInfo storedBlock = getStoredBlock(blk);
     if (storedBlock == null) {
@@ -668,14 +701,14 @@ public class BlockManager {
     }
   }
 
-  void updateState() {
+  public void updateState() {
     pendingReplicationBlocksCount = pendingReplications.size();
     underReplicatedBlocksCount = neededReplications.size();
     corruptReplicaBlocksCount = corruptReplicas.size();
   }
 
   /** Return number of under-replicated but not missing blocks */
-  int getUnderReplicatedNotMissingBlocks() {
+  public int getUnderReplicatedNotMissingBlocks() {
     return neededReplications.getUnderReplicatedBlockCount();
   }
   
@@ -684,7 +717,7 @@ public class BlockManager {
    * @param nodesToProcess number of datanodes to schedule deletion work
    * @return total number of block for deletion
    */
-  int computeInvalidateWork(int nodesToProcess) {
+  public int computeInvalidateWork(int nodesToProcess) {
     int numOfNodes = recentInvalidateSets.size();
     nodesToProcess = Math.min(numOfNodes, nodesToProcess);
 
@@ -724,7 +757,7 @@ public class BlockManager {
    *
    * @return number of blocks scheduled for replication during this iteration.
    */
-  int computeReplicationWork(int blocksToProcess) throws IOException {
+  public int computeReplicationWork(int blocksToProcess) throws IOException {
     // Choose the blocks to be replicated
     List<List<Block>> blocksToReplicate =
       chooseUnderReplicatedBlocks(blocksToProcess);
@@ -1031,7 +1064,7 @@ public class BlockManager {
    * If there were any replication requests that timed out, reap them
    * and put them back into the neededReplication queue
    */
-  void processPendingReplications() {
+  public void processPendingReplications() {
     Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
     if (timedOutItems != null) {
       namesystem.writeLock();
@@ -1464,7 +1497,7 @@ public class BlockManager {
     short fileReplication = fileINode.getReplication();
     if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
       neededReplications.remove(storedBlock, numCurrentReplica,
-          num.decommissionedReplicas, fileReplication);
+          num.decommissionedReplicas(), fileReplication);
     } else {
       updateNeededReplications(storedBlock, curReplicaDelta, 0);
     }
@@ -1525,7 +1558,7 @@ public class BlockManager {
    * For each block in the name-node verify whether it belongs to any file,
    * over or under replicated. Place it into the respective queue.
    */
-  void processMisReplicatedBlocks() {
+  public void processMisReplicatedBlocks() {
     long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0;
     namesystem.writeLock();
     try {
@@ -1570,7 +1603,7 @@ public class BlockManager {
    * If there are any extras, call chooseExcessReplicates() to
    * mark them in the excessReplicateMap.
    */
-  void processOverReplicatedBlock(Block block, short replication,
+  public void processOverReplicatedBlock(Block block, short replication,
       DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
     assert namesystem.hasWriteLock();
     if (addedNode == delNodeHint) {
@@ -1597,7 +1630,7 @@ public class BlockManager {
         addedNode, delNodeHint, replicator);
   }
 
-  void addToExcessReplicate(DatanodeInfo dn, Block block) {
+  public void addToExcessReplicate(DatanodeInfo dn, Block block) {
     assert namesystem.hasWriteLock();
     Collection<Block> excessBlocks = excessReplicateMap.get(dn.getStorageID());
     if (excessBlocks == null) {
@@ -1618,7 +1651,7 @@ public class BlockManager {
    * Modify (block-->datanode) map. Possibly generate replication tasks, if the
    * removed block is still valid.
    */
-  void removeStoredBlock(Block block, DatanodeDescriptor node) {
+  public void removeStoredBlock(Block block, DatanodeDescriptor node) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
           + block + " from " + node.getName());
@@ -1673,7 +1706,7 @@ public class BlockManager {
   /**
    * The given node is reporting that it received a certain block.
    */
-  void addBlock(DatanodeDescriptor node, Block block, String delHint)
+  public void addBlock(DatanodeDescriptor node, Block block, String delHint)
       throws IOException {
     // decrement number of blocks scheduled to this datanode.
     node.decBlocksScheduled();
@@ -1726,7 +1759,7 @@ public class BlockManager {
   /**
    * Return the number of nodes that are live and decommissioned.
    */
-  NumberReplicas countNodes(Block b) {
+  public NumberReplicas countNodes(Block b) {
     int count = 0;
     int live = 0;
     int corrupt = 0;
@@ -1805,7 +1838,7 @@ public class BlockManager {
    * Return true if there are any blocks on this node that have not
    * yet reached their replication factor. Otherwise returns false.
    */
-  boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
+  public boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
     boolean status = false;
     int underReplicatedBlocks = 0;
     int decommissionOnlyReplicas = 0;
@@ -1855,11 +1888,11 @@ public class BlockManager {
     return status;
   }
 
-  int getActiveBlockCount() {
+  public int getActiveBlockCount() {
     return blocksMap.size() - (int)pendingDeletionBlocksCount;
   }
 
-  DatanodeDescriptor[] getNodes(BlockInfo block) {
+  public DatanodeDescriptor[] getNodes(BlockInfo block) {
     DatanodeDescriptor[] nodes =
       new DatanodeDescriptor[block.numNodes()];
     Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
@@ -1869,22 +1902,22 @@ public class BlockManager {
     return nodes;
   }
 
-  int getTotalBlocks() {
+  public int getTotalBlocks() {
     return blocksMap.size();
   }
 
-  void removeBlock(Block block) {
+  public void removeBlock(Block block) {
     addToInvalidates(block);
     corruptReplicas.removeFromCorruptReplicasMap(block);
     blocksMap.removeBlock(block);
   }
 
-  BlockInfo getStoredBlock(Block block) {
+  public BlockInfo getStoredBlock(Block block) {
     return blocksMap.getStoredBlock(block);
   }
 
   /* updates a block in under replication queue */
-  void updateNeededReplications(Block block, int curReplicasDelta,
+  public void updateNeededReplications(Block block, int curReplicasDelta,
       int expectedReplicasDelta) {
     namesystem.writeLock();
     try {
@@ -1905,13 +1938,13 @@ public class BlockManager {
     }
   }
 
-  void checkReplication(Block block, int numExpectedReplicas) {
+  public void checkReplication(Block block, int numExpectedReplicas) {
     // filter out containingNodes that are marked for decommission.
     NumberReplicas number = countNodes(block);
     if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) { 
       neededReplications.add(block,
                              number.liveReplicas(),
-                             number.decommissionedReplicas,
+                             number.decommissionedReplicas(),
                              numExpectedReplicas);
     }
   }
@@ -1926,11 +1959,8 @@ public class BlockManager {
     return fileINode.getReplication();
   }
 
-  /**
-   * Remove a datanode from the invalidatesSet
-   * @param n datanode
-   */
-  void removeFromInvalidates(String storageID) {
+  /** Remove a datanode from the invalidatesSet */
+  public void removeFromInvalidates(String storageID) {
     Collection<Block> blocks = recentInvalidateSets.remove(storageID);
     if (blocks != null) {
       pendingDeletionBlocksCount -= blocks.size();
@@ -1998,7 +2028,7 @@ public class BlockManager {
   //Returns the number of racks over which a given block is replicated
   //decommissioning/decommissioned nodes are not counted. corrupt replicas 
   //are also ignored
-  int getNumberOfRacks(Block b) {
+  public int getNumberOfRacks(Block b) {
     HashSet<String> rackSet = new HashSet<String>(0);
     Collection<DatanodeDescriptor> corruptNodes = 
                                   corruptReplicas.getNodes(b);
@@ -2056,32 +2086,32 @@ public class BlockManager {
     }
   }
   
-  long getMissingBlocksCount() {
+  public long getMissingBlocksCount() {
     // not locking
     return this.neededReplications.getCorruptBlockSize();
   }
 
-  BlockInfo addINode(BlockInfo block, INodeFile iNode) {
+  public BlockInfo addINode(BlockInfo block, INodeFile iNode) {
     return blocksMap.addINode(block, iNode);
   }
 
-  INodeFile getINode(Block b) {
+  public INodeFile getINode(Block b) {
     return blocksMap.getINode(b);
   }
 
-  void removeFromCorruptReplicasMap(Block block) {
+  public void removeFromCorruptReplicasMap(Block block) {
     corruptReplicas.removeFromCorruptReplicasMap(block);
   }
 
-  int numCorruptReplicas(Block block) {
+  public int numCorruptReplicas(Block block) {
     return corruptReplicas.numCorruptReplicas(block);
   }
 
-  void removeBlockFromMap(Block block) {
+  public void removeBlockFromMap(Block block) {
     blocksMap.removeBlock(block);
   }
 
-  int getCapacity() {
+  public int getCapacity() {
     namesystem.readLock();
     try {
       return blocksMap.getCapacity();
@@ -2104,7 +2134,7 @@ public class BlockManager {
    * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
    *
    */
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  public long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
                                    Long startingBlockId) {
     return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks,
                                                      startingBlockId);
@@ -2113,7 +2143,7 @@ public class BlockManager {
   /**
    * Return an iterator over the set of blocks for which there are no replicas.
    */
-  UnderReplicatedBlocks.BlockIterator getCorruptReplicaBlockIterator() {
+  public BlockIterator getCorruptReplicaBlockIterator() {
     return neededReplications
         .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
   }

+ 6 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -26,6 +26,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
+import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -94,7 +96,7 @@ public abstract class BlockPlacementPolicy {
    * @return array of DatanodeDescriptor instances chosen as target
    * and sorted as a pipeline.
    */
-  abstract DatanodeDescriptor[] chooseTarget(String srcPath,
+  public abstract DatanodeDescriptor[] chooseTarget(String srcPath,
                                              int numOfReplicas,
                                              DatanodeDescriptor writer,
                                              List<DatanodeDescriptor> chosenNodes,
@@ -222,11 +224,11 @@ public abstract class BlockPlacementPolicy {
    * @param numOfReplicas number of replicas wanted.
    * @param writer the writer's machine, null if not in the cluster.
    * @param blocksize size of the data to be written.
-   * @param excludedNodes: datanodes that should not be considered as targets.
+   * @param excludedNodes datanodes that should not be considered as targets.
    * @return array of DatanodeDescriptor instances chosen as targets
    * and sorted as a pipeline.
    */
-  DatanodeDescriptor[] chooseTarget(String srcPath,
+  public DatanodeDescriptor[] chooseTarget(String srcPath,
                                     int numOfReplicas,
                                     DatanodeDescriptor writer,
                                     HashMap<Node, Node> excludedNodes,

+ 5 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -32,6 +32,9 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
+import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
@@ -89,7 +92,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
 
   /** {@inheritDoc} */
   @Override
-  DatanodeDescriptor[] chooseTarget(String srcPath,
+  public DatanodeDescriptor[] chooseTarget(String srcPath,
                                     int numOfReplicas,
                                     DatanodeDescriptor writer,
                                     List<DatanodeDescriptor> chosenNodes,

+ 6 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -15,11 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.Iterator;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.GSet;
 import org.apache.hadoop.hdfs.util.LightWeightGSet;
 
@@ -28,7 +29,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet;
  * block's metadata currently includes INode it belongs to and
  * the datanodes that store the block.
  */
-class BlocksMap {
+public class BlocksMap {
   private static class NodeIterator implements Iterator<DatanodeDescriptor> {
     private BlockInfo blockInfo;
     private int nextIdx = 0;
@@ -100,7 +101,7 @@ class BlocksMap {
   /**
    * Add block b belonging to the specified file inode to the map.
    */
-  BlockInfo addINode(BlockInfo b, INodeFile iNode) {
+  public BlockInfo addINode(BlockInfo b, INodeFile iNode) {
     BlockInfo info = blocks.get(b);
     if (info != b) {
       info = b;
@@ -136,7 +137,7 @@ class BlocksMap {
    * Searches for the block in the BlocksMap and 
    * returns Iterator that iterates through the nodes the block belongs to.
    */
-  Iterator<DatanodeDescriptor> nodeIterator(Block b) {
+  public Iterator<DatanodeDescriptor> nodeIterator(Block b) {
     return nodeIterator(blocks.get(b));
   }
 
@@ -185,7 +186,7 @@ class BlocksMap {
   /**
    * Check if the block exists in map
    */
-  boolean contains(Block block) {
+  public boolean contains(Block block) {
     return blocks.contains(block);
   }
   

+ 2 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java

@@ -15,10 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
 import java.util.*;

+ 38 - 34
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.DataInput;
 import java.io.IOException;
@@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.WritableUtils;
@@ -44,7 +45,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   
   // Stores status of decommissioning.
   // If node is not decommissioning, do not use this object for anything.
-  DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
+  public DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
   
   /** Block and targets pair */
   @InterfaceAudience.Private
@@ -96,8 +97,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
   private int numBlocks = 0;
   // isAlive == heartbeats.contains(this)
   // This is an optimization, because contains takes O(n) time on Arraylist
-  protected boolean isAlive = false;
-  protected boolean needKeyUpdate = false;
+  public boolean isAlive = false;
+  public boolean needKeyUpdate = false;
 
   /** A queue of blocks to be replicated by this datanode */
   private BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
@@ -204,7 +205,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * Add datanode to the block.
    * Add block to the head of the list of blocks belonging to the data-node.
    */
-  boolean addBlock(BlockInfo b) {
+  public boolean addBlock(BlockInfo b) {
     if(!b.addNode(this))
       return false;
     // add to the head of the data-node list
@@ -217,7 +218,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * Remove block from the list of blocks belonging to the data-node.
    * Remove datanode from the block.
    */
-  boolean removeBlock(BlockInfo b) {
+  public boolean removeBlock(BlockInfo b) {
     blockList = b.listRemove(blockList, this);
     if ( b.removeNode(this) ) {
       numBlocks--;
@@ -242,7 +243,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param newBlock - a replacement block
    * @return the new block
    */
-  BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
+  public BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
     boolean done = removeBlock(oldBlock);
     assert done : "Old block should belong to the data-node when replacing";
     done = addBlock(newBlock);
@@ -250,7 +251,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
     return newBlock;
   }
 
-  void resetBlocks() {
+  public void resetBlocks() {
     this.capacity = 0;
     this.remaining = 0;
     this.blockPoolUsed = 0;
@@ -268,7 +269,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    * Updates stats from datanode heartbeat.
    */
-  void updateHeartbeat(long capacity, long dfsUsed, long remaining,
+  public void updateHeartbeat(long capacity, long dfsUsed, long remaining,
       long blockPoolUsed, int xceiverCount, int volFailures) {
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
@@ -283,7 +284,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    * Iterates over the list of blocks belonging to the datanode.
    */
-  static class BlockIterator implements Iterator<BlockInfo> {
+  public static class BlockIterator implements Iterator<BlockInfo> {
     private BlockInfo current;
     private DatanodeDescriptor node;
       
@@ -307,7 +308,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
   }
 
-  Iterator<BlockInfo> getBlockIterator() {
+  public Iterator<BlockInfo> getBlockIterator() {
     return new BlockIterator(this.blockList, this);
   }
   
@@ -361,11 +362,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
   }
   
-  List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
+  public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
     return replicateBlocks.poll(maxTransfers);
   }
 
-  BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
+  public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
     List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
     if(blocks == null)
       return null;
@@ -375,7 +376,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    * Remove the specified number of blocks to be invalidated
    */
-  Block[] getInvalidateBlocks(int maxblocks) {
+  public Block[] getInvalidateBlocks(int maxblocks) {
     return getBlockArray(invalidateBlocks, maxblocks); 
   }
 
@@ -418,7 +419,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   /** Serialization for FSEditLog */
-  void readFieldsFromFSEditLog(DataInput in) throws IOException {
+  public void readFieldsFromFSEditLog(DataInput in) throws IOException {
     this.name = DeprecatedUTF8.readString(in);
     this.storageID = DeprecatedUTF8.readString(in);
     this.infoPort = in.readShort() & 0x0000ffff;
@@ -445,7 +446,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    * Increments counter for number of blocks scheduled. 
    */
-  void incBlocksScheduled() {
+  public void incBlocksScheduled() {
     currApproxBlocksScheduled++;
   }
   
@@ -485,12 +486,13 @@ public class DatanodeDescriptor extends DatanodeInfo {
     // by DatanodeID
     return (this == obj) || super.equals(obj);
   }
-  
-  class DecommissioningStatus {
-    int underReplicatedBlocks;
-    int decommissionOnlyReplicas;
-    int underReplicatedInOpenFiles;
-    long startTime;
+
+  /** Decommissioning status */
+  public class DecommissioningStatus {
+    private int underReplicatedBlocks;
+    private int decommissionOnlyReplicas;
+    private int underReplicatedInOpenFiles;
+    private long startTime;
     
     synchronized void set(int underRep,
         int onlyRep, int underConstruction) {
@@ -501,32 +503,34 @@ public class DatanodeDescriptor extends DatanodeInfo {
       decommissionOnlyReplicas = onlyRep;
       underReplicatedInOpenFiles = underConstruction;
     }
-    
-    synchronized int getUnderReplicatedBlocks() {
+
+    /** @return the number of under-replicated blocks */
+    public synchronized int getUnderReplicatedBlocks() {
       if (isDecommissionInProgress() == false) {
         return 0;
       }
       return underReplicatedBlocks;
     }
-    synchronized int getDecommissionOnlyReplicas() {
+    /** @return the number of decommission-only replicas */
+    public synchronized int getDecommissionOnlyReplicas() {
       if (isDecommissionInProgress() == false) {
         return 0;
       }
       return decommissionOnlyReplicas;
     }
-
-    synchronized int getUnderReplicatedInOpenFiles() {
+    /** @return the number of under-replicated blocks in open files */
+    public synchronized int getUnderReplicatedInOpenFiles() {
       if (isDecommissionInProgress() == false) {
         return 0;
       }
       return underReplicatedInOpenFiles;
     }
-
-    synchronized void setStartTime(long time) {
+    /** Set start time */
+    public synchronized void setStartTime(long time) {
       startTime = time;
     }
-    
-    synchronized long getStartTime() {
+    /** @return start time */
+    public synchronized long getStartTime() {
       if (isDecommissionInProgress() == false) {
         return 0;
       }
@@ -538,11 +542,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * Set the flag to indicate if this datanode is disallowed from communicating
    * with the namenode.
    */
-  void setDisallowed(boolean flag) {
+  public void setDisallowed(boolean flag) {
     disallowed = flag;
   }
-  
-  boolean isDisallowed() {
+  /** Is the datanode disallowed from communicating with the namenode? */
+  public boolean isDisallowed() {
     return disallowed;
   }
 

+ 57 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+/**
+ * A immutable object that stores the number of live replicas and
+ * the number of decommissined Replicas.
+ */
+public class NumberReplicas {
+  private int liveReplicas;
+  private int decommissionedReplicas;
+  private int corruptReplicas;
+  private int excessReplicas;
+
+  NumberReplicas() {
+    initialize(0, 0, 0, 0);
+  }
+
+  NumberReplicas(int live, int decommissioned, int corrupt, int excess) {
+    initialize(live, decommissioned, corrupt, excess);
+  }
+
+  void initialize(int live, int decommissioned, int corrupt, int excess) {
+    liveReplicas = live;
+    decommissionedReplicas = decommissioned;
+    corruptReplicas = corrupt;
+    excessReplicas = excess;
+  }
+
+  public int liveReplicas() {
+    return liveReplicas;
+  }
+  public int decommissionedReplicas() {
+    return decommissionedReplicas;
+  }
+  public int corruptReplicas() {
+    return corruptReplicas;
+  }
+  public int excessReplicas() {
+    return excessReplicas;
+  }
+} 

+ 3 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/PendingReplicationBlocks.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java

@@ -15,9 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.util.*;
 import java.io.*;

+ 6 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java → hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java

@@ -15,17 +15,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.*;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 /* Class for keeping track of under replication blocks
  * Blocks have replication priority, with priority 0 indicating the highest
  * Blocks have only one replicas has the highest
  */
-class UnderReplicatedBlocks implements Iterable<Block> {
+public class UnderReplicatedBlocks implements Iterable<Block> {
   static final int LEVEL = 5;
   static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
   private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
@@ -47,7 +48,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
   }
 
   /* Return the total number of under replication blocks */
-  synchronized int size() {
+  public synchronized int size() {
     int size = 0;
     for (int i=0; i<LEVEL; i++) {
       size += priorityQueues.get(i).size();
@@ -70,7 +71,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
   }
   
   /* Check if a block is in the neededReplication queue */
-  synchronized boolean contains(Block block) {
+  public synchronized boolean contains(Block block) {
     for(TreeSet<Block> set:priorityQueues) {
       if(set.contains(block)) { return true; }
     }
@@ -218,7 +219,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
     return new BlockIterator();
   }
   
-  class BlockIterator implements Iterator<Block> {
+  public class BlockIterator implements Iterator<Block> {
     private int level;
     private boolean isIteratorForLevel = false;
     private List<Iterator<Block>> iterators = new ArrayList<Iterator<Block>>();

+ 2 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -26,8 +26,8 @@ import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.URL;
 import java.net.URLEncoder;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
@@ -45,14 +45,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.io.Text;

+ 1 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DecommissionManager.java

@@ -21,6 +21,7 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.util.CyclicIteration;
 
 /**

+ 18 - 11
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.common.Util.now;
+
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -24,35 +26,40 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.locks.Condition;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
-import org.apache.hadoop.hdfs.protocol.FSLimitException.*;
+import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
+import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.util.ByteArray;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /*************************************************
  * FSDirectory stores the filesystem directory state.
@@ -63,7 +70,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
  * and logged to disk.
  * 
  *************************************************/
-class FSDirectory implements Closeable {
+public class FSDirectory implements Closeable {
 
   INodeDirectoryWithQuota rootDir;
   FSImage fsImage;  
@@ -1332,7 +1339,7 @@ class FSDirectory implements Closeable {
    * @throws QuotaExceededException if the new count violates any quota limit
    * @throws FileNotFound if path does not exist.
    */
-  void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
+  public void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
                                          throws QuotaExceededException,
                                                 FileNotFoundException,
                                                 UnresolvedLinkException {

+ 24 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.common.Util.now;
+
 import java.io.BufferedInputStream;
 import java.io.DataInputStream;
 import java.io.EOFException;
@@ -32,10 +34,30 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*;
 
 public class FSEditLogLoader {
   private final FSNamesystem fsNamesys;

+ 2 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.io.BytesWritable;

+ 1 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;

+ 3 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java

@@ -22,7 +22,6 @@ import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -33,6 +32,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;

+ 34 - 68
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -97,15 +97,20 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
-import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@@ -235,7 +240,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   // Stores the correct file name hierarchy
   //
   public FSDirectory dir;
-  BlockManager blockManager;
+  public BlockManager blockManager;
   
   // Block pool ID used by this namenode
   String blockPoolId;
@@ -270,10 +275,10 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
    * Stores a set of DatanodeDescriptor objects.
    * This is a subset of {@link #datanodeMap}, containing nodes that are 
    * considered alive.
-   * The {@link HeartbeatMonitor} periodically checks for outdated entries,
+   * The HeartbeatMonitor periodically checks for out-dated entries,
    * and removes them from the list.
    */
-  ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>();
+  public ArrayList<DatanodeDescriptor> heartbeats = new ArrayList<DatanodeDescriptor>();
 
   public LeaseManager leaseManager = new LeaseManager(this); 
 
@@ -314,8 +319,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   private volatile SafeModeInfo safeMode;  // safe mode information
   private Host2NodesMap host2DataNodeMap = new Host2NodesMap();
     
-  // datanode networktoplogy
-  NetworkTopology clusterMap = new NetworkTopology();
+  /** datanode network toplogy */
+  public NetworkTopology clusterMap = new NetworkTopology();
   private DNSToSwitchMapping dnsToSwitchMapping;
 
   private HostsFileReader hostsReader; 
@@ -329,7 +334,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   private final GenerationStamp generationStamp = new GenerationStamp();
 
   // Ask Datanode only up to this many blocks to delete.
-  int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
+  public int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
 
   // precision of access times.
   private long accessTimePrecision = 0;
@@ -472,23 +477,23 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   }
 
   // utility methods to acquire and release read lock and write lock
-  void readLock() {
+  public void readLock() {
     this.fsLock.readLock().lock();
   }
 
-  void readUnlock() {
+  public void readUnlock() {
     this.fsLock.readLock().unlock();
   }
 
-  void writeLock() {
+  public void writeLock() {
     this.fsLock.writeLock().lock();
   }
 
-  void writeUnlock() {
+  public void writeUnlock() {
     this.fsLock.writeLock().unlock();
   }
 
-  boolean hasWriteLock() {
+  public boolean hasWriteLock() {
     return this.fsLock.isWriteLockedByCurrentThread();
   }
 
@@ -1014,7 +1019,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   }
 
   /** Create a LocatedBlock. */
-  LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
+  public LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
       final long offset, final boolean corrupt) throws IOException {
     return new LocatedBlock(getExtendedBlock(b), locations, offset, corrupt);
   }
@@ -3013,7 +3018,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
    * @return an array of datanode commands 
    * @throws IOException
    */
-  DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
+  public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
       long capacity, long dfsUsed, long remaining, long blockPoolUsed,
       int xceiverCount, int xmitsInProgress, int failedVolumes) 
         throws IOException {
@@ -3521,7 +3526,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
    * If no such a node is available,
    * then pick a node with least free space
    */
-  void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, 
+  public void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, 
                               Block b, short replication,
                               DatanodeDescriptor addedNode,
                               DatanodeDescriptor delNodeHint,
@@ -3979,45 +3984,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     return replication;
   }
     
-  /**
-   * A immutable object that stores the number of live replicas and
-   * the number of decommissined Replicas.
-   */
-  static class NumberReplicas {
-    private int liveReplicas;
-    int decommissionedReplicas;
-    private int corruptReplicas;
-    private int excessReplicas;
-
-    NumberReplicas() {
-      initialize(0, 0, 0, 0);
-    }
-
-    NumberReplicas(int live, int decommissioned, int corrupt, int excess) {
-      initialize(live, decommissioned, corrupt, excess);
-    }
-
-    void initialize(int live, int decommissioned, int corrupt, int excess) {
-      liveReplicas = live;
-      decommissionedReplicas = decommissioned;
-      corruptReplicas = corrupt;
-      excessReplicas = excess;
-    }
-
-    int liveReplicas() {
-      return liveReplicas;
-    }
-    int decommissionedReplicas() {
-      return decommissionedReplicas;
-    }
-    int corruptReplicas() {
-      return corruptReplicas;
-    }
-    int excessReplicas() {
-      return excessReplicas;
-    }
-  } 
-
   /**
    * Change, if appropriate, the admin state of a datanode to 
    * decommission completed. Return true if decommission is complete.
@@ -4675,7 +4641,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
    * Check whether the name node is in safe mode.
    * @return true if safe mode is ON, false otherwise
    */
-  boolean isInSafeMode() {
+  public boolean isInSafeMode() {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
     if (safeMode == null)
@@ -4686,7 +4652,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   /**
    * Check whether the name node is in startup mode.
    */
-  boolean isInStartupSafeMode() {
+  public boolean isInStartupSafeMode() {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
     if (safeMode == null)
@@ -4697,7 +4663,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   /**
    * Check whether replication queues are populated.
    */
-  boolean isPopulatingReplQueues() {
+  public boolean isPopulatingReplQueues() {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
     if (safeMode == null)
@@ -4709,7 +4675,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
    * Increment number of blocks that reached minimal replication.
    * @param replication current replication 
    */
-  void incrementSafeBlockCount(int replication) {
+  public void incrementSafeBlockCount(int replication) {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
     if (safeMode == null)
@@ -4720,7 +4686,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   /**
    * Decrement number of blocks that reached minimal replication.
    */
-  void decrementSafeBlockCount(Block b) {
+  public void decrementSafeBlockCount(Block b) {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
     if (safeMode == null) // mostly true
@@ -5042,13 +5008,13 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   @Override // FSNamesystemMBean
   @Metric
   public long getPendingReplicationBlocks() {
-    return blockManager.pendingReplicationBlocksCount;
+    return blockManager.getPendingReplicationBlocksCount();
   }
 
   @Override // FSNamesystemMBean
   @Metric
   public long getUnderReplicatedBlocks() {
-    return blockManager.underReplicatedBlocksCount;
+    return blockManager.getUnderReplicatedBlocksCount();
   }
 
   /** Return number of under-replicated but not missing blocks */
@@ -5059,23 +5025,23 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   /** Returns number of blocks with corrupt replicas */
   @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
   public long getCorruptReplicaBlocks() {
-    return blockManager.corruptReplicaBlocksCount;
+    return blockManager.getCorruptReplicaBlocksCount();
   }
 
   @Override // FSNamesystemMBean
   @Metric
   public long getScheduledReplicationBlocks() {
-    return blockManager.scheduledReplicationBlocksCount;
+    return blockManager.getScheduledReplicationBlocksCount();
   }
 
   @Metric
   public long getPendingDeletionBlocks() {
-    return blockManager.pendingDeletionBlocksCount;
+    return blockManager.getPendingDeletionBlocksCount();
   }
 
   @Metric
   public long getExcessBlocks() {
-    return blockManager.excessBlocksCount;
+    return blockManager.getExcessBlocksCount();
   }
   
   @Metric
@@ -5444,7 +5410,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   }
 
   /** Get a datanode descriptor given corresponding storageID */
-  DatanodeDescriptor getDatanode(String nodeID) {
+  public DatanodeDescriptor getDatanode(String nodeID) {
     assert hasReadOrWriteLock();
     return datanodeMap.get(nodeID);
   }
@@ -5508,7 +5474,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       if (startBlockAfter != null) {
         startBlockId = Block.filename2id(startBlockAfter);
       }
-      BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
+      UnderReplicatedBlocks.BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
       while (blkIterator.hasNext()) {
         Block blk = blkIterator.next();
         INode inode = blockManager.getINode(blk);

+ 4 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Host2NodesMap.java

@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.*;
+import java.util.HashMap;
+import java.util.Random;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+
 class Host2NodesMap {
   private HashMap<String, DatanodeDescriptor[]> map
     = new HashMap<String, DatanodeDescriptor[]>();

+ 6 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -32,7 +34,7 @@ import org.apache.hadoop.util.StringUtils;
  * This is a base INode class containing common fields for file and 
  * directory inodes.
  */
-abstract class INode implements Comparable<byte[]>, FSInodeInfo {
+public abstract class INode implements Comparable<byte[]>, FSInodeInfo {
   /*
    *  The inode name is in java UTF8 encoding; 
    *  The name in HdfsFileStatus should keep the same encoding as this.
@@ -324,7 +326,7 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
   /**
    * Is this inode being constructed?
    */
-  boolean isUnderConstruction() {
+  public boolean isUnderConstruction() {
     return false;
   }
 

+ 9 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -24,8 +24,11 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 
-class INodeFile extends INode {
+/** I-node for closed file. */
+public class INodeFile extends INode {
   static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
 
   //Number of bits for Block size
@@ -106,7 +109,7 @@ class INodeFile extends INode {
    * Get file blocks 
    * @return file blocks
    */
-  BlockInfo[] getBlocks() {
+  public BlockInfo[] getBlocks() {
     return this.blocks;
   }
 
@@ -149,7 +152,7 @@ class INodeFile extends INode {
   /**
    * Set file block
    */
-  void setBlock(int idx, BlockInfo blk) {
+  public void setBlock(int idx, BlockInfo blk) {
     this.blocks[idx] = blk;
   }
 
@@ -237,7 +240,7 @@ class INodeFile extends INode {
    * Get the last block of the file.
    * Make sure it has the right type.
    */
-  <T extends BlockInfo> T getLastBlock() throws IOException {
+  public <T extends BlockInfo> T getLastBlock() throws IOException {
     if (blocks == null || blocks.length == 0)
       return null;
     T returnBlock = null;
@@ -252,7 +255,8 @@ class INodeFile extends INode {
     return returnBlock;
   }
 
-  int numBlocks() {
+  /** @return the number of blocks */ 
+  public int numBlocks() {
     return blocks == null ? 0 : blocks.length;
   }
 }

+ 10 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java

@@ -21,10 +21,15 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 
-
-class INodeFileUnderConstruction extends INodeFile {
+/**
+ * I-node for file being written.
+ */
+public class INodeFileUnderConstruction extends INodeFile {
   private  String clientName;         // lease holder
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@@ -43,7 +48,7 @@ class INodeFileUnderConstruction extends INodeFile {
     this.clientNode = clientNode;
   }
 
-  public INodeFileUnderConstruction(byte[] name,
+  INodeFileUnderConstruction(byte[] name,
                              short blockReplication,
                              long modificationTime,
                              long preferredBlockSize,
@@ -80,7 +85,7 @@ class INodeFileUnderConstruction extends INodeFile {
    * Is this inode being constructed?
    */
   @Override
-  boolean isUnderConstruction() {
+  public boolean isUnderConstruction() {
     return true;
   }
 
@@ -122,7 +127,7 @@ class INodeFileUnderConstruction extends INodeFile {
    * Convert the last block of the file to an under-construction block.
    * Set its locations.
    */
-  BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+  public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
                                           DatanodeDescriptor[] targets)
   throws IOException {
     if (blocks == null || blocks.length == 0) {

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -163,7 +163,7 @@ public class LeaseManager {
   /**
    * Finds the pathname for the specified pendingFile
    */
-  synchronized String findPath(INodeFileUnderConstruction pendingFile)
+  public synchronized String findPath(INodeFileUnderConstruction pendingFile)
       throws IOException {
     Lease lease = getLease(pendingFile.getClientName());
     if (lease != null) {

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -247,7 +247,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
   /** Return the {@link FSNamesystem} object.
    * @return {@link FSNamesystem} object.
    */
-  FSNamesystem getNamesystem() {
+  public FSNamesystem getNamesystem() {
     return namesystem;
   }
 

+ 3 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -36,20 +36,20 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NodeBase;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**

+ 4 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -36,24 +36,24 @@ import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
-
-import org.znerd.xmlenc.*;
+import org.znerd.xmlenc.XMLOutputter;
 
 class NamenodeJspHelper {
   static String getSafeModeText(FSNamesystem fsn) {

+ 8 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java

@@ -17,15 +17,20 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
-import org.apache.hadoop.io.*;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
 
 
 /****************************************************

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -61,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -25,7 +25,7 @@ import junit.framework.TestCase;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 
 /**
  * This class tests DatanodeDescriptor.getBlocksScheduled() at the

+ 8 - 6
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java

@@ -15,17 +15,20 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
 import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 
 
 /**
@@ -79,7 +82,6 @@ public class TestCorruptReplicaInfo extends TestCase {
       
       DatanodeDescriptor dn1 = new DatanodeDescriptor();
       DatanodeDescriptor dn2 = new DatanodeDescriptor();
-      DatanodeDescriptor dn3 = new DatanodeDescriptor();
       
       crm.addToCorruptReplicasMap(getBlock(0), dn1);
       assertEquals("Number of corrupt blocks not returning correctly",

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
 

+ 2 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import junit.framework.TestCase;
 import java.lang.System;

+ 9 - 9
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -15,8 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -24,18 +23,19 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
 
 public class TestReplicationPolicy extends TestCase {
   private static final int BLOCK_SIZE = 1024;

+ 4 - 3
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java

@@ -15,7 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -25,8 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 
 public class TestUnderReplicatedBlocks extends TestCase {
   public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {

+ 12 - 11
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java

@@ -17,30 +17,31 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+
 import java.io.File;
 import java.util.ArrayList;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.log4j.Level;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import static org.junit.Assert.*;
-import static org.junit.Assume.assumeTrue;
 
 /**
  * Test reporting of DN volume failure counts and metrics.

+ 1 - 0
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java

@@ -23,6 +23,7 @@ import java.io.IOException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.Storage;
 

+ 2 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

@@ -19,10 +19,11 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
-import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.ipc.Server;
 
 /**
  * This is a utility class to expose NameNode functionality for unit tests.

+ 2 - 0
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java

@@ -34,6 +34,8 @@ import org.apache.hadoop.hdfs.TestFileCreation;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;

+ 3 - 2
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java

@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 
-import junit.framework.TestCase;
-
 /**
  * Test if FSNamesystem handles heartbeat right
  */

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java

@@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;

+ 7 - 9
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java

@@ -17,19 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Random;
 
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -40,8 +36,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
 
 /**
  * This class tests the decommissioning of nodes.

+ 3 - 2
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java

@@ -18,10 +18,11 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-
 import junit.framework.TestCase;
 
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+
 public class TestHost2NodesMap extends TestCase {
   static private Host2NodesMap map = new Host2NodesMap();
   private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {

+ 6 - 6
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java

@@ -21,17 +21,17 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.File;
 import java.util.ArrayList;
 
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import junit.framework.TestCase;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 
 
 

+ 4 - 3
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java

@@ -21,6 +21,8 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.concurrent.TimeoutException;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -30,9 +32,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 
 /**
  * Test if live nodes count per node is correct 

+ 5 - 4
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java

@@ -20,20 +20,21 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.File;
 import java.io.IOException;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import junit.framework.TestCase;
 
 public class TestOverReplicatedBlocks extends TestCase {
   /** Test processOverReplicatedBlock can handle corrupt replicas fine.

+ 9 - 8
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java

@@ -17,34 +17,35 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.Random;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.namenode.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.log4j.Level;
 
-import org.apache.commons.logging.LogFactory;
-
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
 /**
  * Test for metrics published by the Namenode
  */

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/net/TestNetworkTopology.java

@@ -24,8 +24,8 @@ import java.util.Map;
 
 import junit.framework.TestCase;
 
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 
 public class TestNetworkTopology extends TestCase {
   private final static NetworkTopology cluster = new NetworkTopology();

+ 13 - 7
hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java

@@ -19,6 +19,16 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -34,20 +44,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
-
-import static org.junit.Assert.*;
 import org.junit.Before;
 import org.junit.Test;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
-
-import java.io.File;
-import java.io.IOException;
 
 public class TestNNLeaseRecovery {
   private static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class);

+ 0 - 10
hdfs/src/webapps/hdfs/block_info_xml.jsp

@@ -59,19 +59,9 @@
 %>
 <%@ page
   contentType="application/xml"
-  import="java.io.IOException"
-  import="java.util.Iterator"
-  import="org.apache.hadoop.conf.Configuration"
-  import="org.apache.hadoop.hdfs.protocol.Block"
-  import="org.apache.hadoop.hdfs.server.namenode.INode"
-  import="org.apache.hadoop.hdfs.server.namenode.BlocksMap"
-  import="org.apache.hadoop.hdfs.server.namenode.BlockInfo"
-  import="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
   import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLBlockInfo"
   import="org.apache.hadoop.hdfs.server.common.JspHelper"
-  import="org.apache.hadoop.util.ServletUtil"
   import="org.znerd.xmlenc.*"
-  
 %>
 <%!
   //for java.io.Serializable