浏览代码

HDFS-381. Remove blocks from DataNode maps when corresponding file
is deleted. (Suresh Srinivas via rangadi)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@787417 13f79535-47bb-0310-9956-ffa450edef68

Raghu Angadi 16 年之前
父节点
当前提交
bc9fac495f

+ 6 - 0
CHANGES.txt

@@ -2,6 +2,12 @@ Hadoop HDFS Change Log
 
 
 Trunk (unreleased changes)
 Trunk (unreleased changes)
 
 
+  IMPROVEMENTS
+
+    HDFS-381. Remove blocks from DataNode maps when corresponding file
+    is deleted. (Suresh Srinivas via rangadi)
+
+  BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than
     lack of quota. Allow quota to be set even if the limit is lower than
     current consumption. (Boris Shkolnik via rangadi)
     current consumption. (Boris Shkolnik via rangadi)

+ 1 - 1
src/java/hdfs-default.xml

@@ -284,7 +284,7 @@ creations/deletions), or "all".</description>
 
 
 <property>
 <property>
   <name>dfs.blockreport.intervalMsec</name>
   <name>dfs.blockreport.intervalMsec</name>
-  <value>3600000</value>
+  <value>21600000</value>
   <description>Determines block reporting interval in milliseconds.</description>
   <description>Determines block reporting interval in milliseconds.</description>
 </property>
 </property>
 
 

+ 7 - 12
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

@@ -547,11 +547,10 @@ public class BlockManager {
     // initialize data structure for the return value
     // initialize data structure for the return value
     List<List<Block>> blocksToReplicate = new ArrayList<List<Block>>(
     List<List<Block>> blocksToReplicate = new ArrayList<List<Block>>(
         UnderReplicatedBlocks.LEVEL);
         UnderReplicatedBlocks.LEVEL);
+    for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) {
+      blocksToReplicate.add(new ArrayList<Block>());
+    }
     synchronized (namesystem) {
     synchronized (namesystem) {
-      for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) {
-        blocksToReplicate.add(new ArrayList<Block>());
-      }
-
       synchronized (neededReplications) {
       synchronized (neededReplications) {
         if (neededReplications.size() == 0) {
         if (neededReplications.size() == 0) {
           missingBlocksInCurIter = 0;
           missingBlocksInCurIter = 0;
@@ -1284,9 +1283,9 @@ public class BlockManager {
   }
   }
 
 
   void removeBlock(Block block) {
   void removeBlock(Block block) {
-    blocksMap.removeINode(block);
-    corruptReplicas.removeFromCorruptReplicasMap(block);
     addToInvalidates(block);
     addToInvalidates(block);
+    corruptReplicas.removeFromCorruptReplicasMap(block);
+    blocksMap.removeBlock(block);
   }
   }
 
 
   BlockInfo getStoredBlock(Block block) {
   BlockInfo getStoredBlock(Block block) {
@@ -1400,10 +1399,6 @@ public class BlockManager {
     return blocksMap.addINode(block, iNode);
     return blocksMap.addINode(block, iNode);
   }
   }
 
 
-  void removeINode(Block block) {
-    blocksMap.removeINode(block);
-  }
-
   INodeFile getINode(Block b) {
   INodeFile getINode(Block b) {
     return blocksMap.getINode(b);
     return blocksMap.getINode(b);
   }
   }
@@ -1416,8 +1411,8 @@ public class BlockManager {
     return corruptReplicas.numCorruptReplicas(block);
     return corruptReplicas.numCorruptReplicas(block);
   }
   }
 
 
-  void removeBlockFromMap(BlockInfo blockInfo) {
-    blocksMap.removeBlock(blockInfo);
+  void removeBlockFromMap(Block block) {
+    blocksMap.removeBlock(block);
   }
   }
   
   
   public int getCapacity() {
   public int getCapacity() {

+ 4 - 18
src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java

@@ -346,37 +346,23 @@ class BlocksMap {
     return info;
     return info;
   }
   }
 
 
-  /**
-   * Remove INode reference from block b.
-   * If it does not belong to any file and data-nodes,
-   * then remove the block from the block map.
-   */
-  void removeINode(Block b) {
-    BlockInfo info = map.get(b);
-    if (info != null) {
-      info.inode = null;
-      if (info.getDatanode(0) == null) {  // no datanodes left
-        map.remove(b);  // remove block from the map
-      }
-    }
-  }
-
   /**
   /**
    * Remove the block from the block map;
    * Remove the block from the block map;
    * remove it from all data-node lists it belongs to;
    * remove it from all data-node lists it belongs to;
    * and remove all data-node locations associated with the block.
    * and remove all data-node locations associated with the block.
    */
    */
-  void removeBlock(BlockInfo blockInfo) {
+  void removeBlock(Block block) {
+    BlockInfo blockInfo = map.remove(block);
     if (blockInfo == null)
     if (blockInfo == null)
       return;
       return;
+
     blockInfo.inode = null;
     blockInfo.inode = null;
     for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
     for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
       DatanodeDescriptor dn = blockInfo.getDatanode(idx);
       DatanodeDescriptor dn = blockInfo.getDatanode(idx);
       dn.removeBlock(blockInfo); // remove from the list and wipe the location
       dn.removeBlock(blockInfo); // remove from the list and wipe the location
     }
     }
-    map.remove(blockInfo);  // remove block from the map
   }
   }
-
+  
   /** Returns the block object it it exists in the map. */
   /** Returns the block object it it exists in the map. */
   BlockInfo getStoredBlock(Block b) {
   BlockInfo getStoredBlock(Block b) {
     return map.get(b);
     return map.get(b);

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -328,7 +328,7 @@ class FSDirectory implements Closeable {
     synchronized (rootDir) {
     synchronized (rootDir) {
       // modify file-> block and blocksMap
       // modify file-> block and blocksMap
       fileNode.removeBlock(block);
       fileNode.removeBlock(block);
-      getBlockManager().removeINode(block);
+      getBlockManager().removeBlockFromMap(block);
       // If block is removed from blocksMap remove it from corruptReplicasMap
       // If block is removed from blocksMap remove it from corruptReplicasMap
       getBlockManager().removeFromCorruptReplicasMap(block);
       getBlockManager().removeFromCorruptReplicasMap(block);