Quellcode durchsuchen

HDFS-381. Remove blocks from DataNode maps when corresponding file
is deleted. (Suresh Srinivas via rangadi)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@787417 13f79535-47bb-0310-9956-ffa450edef68

Raghu Angadi vor 16 Jahren
Ursprung
Commit
bc9fac495f

+ 6 - 0
CHANGES.txt

@@ -2,6 +2,12 @@ Hadoop HDFS Change Log
 
 Trunk (unreleased changes)
 
+  IMPROVEMENTS
+
+    HDFS-381. Remove blocks from DataNode maps when corresponding file
+    is deleted. (Suresh Srinivas via rangadi)
+
+  BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than
     current consumption. (Boris Shkolnik via rangadi)

+ 1 - 1
src/java/hdfs-default.xml

@@ -284,7 +284,7 @@ creations/deletions), or "all".</description>
 
 <property>
   <name>dfs.blockreport.intervalMsec</name>
-  <value>3600000</value>
+  <value>21600000</value>
   <description>Determines block reporting interval in milliseconds.</description>
 </property>
 

+ 7 - 12
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

@@ -547,11 +547,10 @@ public class BlockManager {
     // initialize data structure for the return value
     List<List<Block>> blocksToReplicate = new ArrayList<List<Block>>(
         UnderReplicatedBlocks.LEVEL);
+    for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) {
+      blocksToReplicate.add(new ArrayList<Block>());
+    }
     synchronized (namesystem) {
-      for (int i = 0; i < UnderReplicatedBlocks.LEVEL; i++) {
-        blocksToReplicate.add(new ArrayList<Block>());
-      }
-
       synchronized (neededReplications) {
         if (neededReplications.size() == 0) {
           missingBlocksInCurIter = 0;
@@ -1284,9 +1283,9 @@ public class BlockManager {
   }
 
   void removeBlock(Block block) {
-    blocksMap.removeINode(block);
-    corruptReplicas.removeFromCorruptReplicasMap(block);
     addToInvalidates(block);
+    corruptReplicas.removeFromCorruptReplicasMap(block);
+    blocksMap.removeBlock(block);
   }
 
   BlockInfo getStoredBlock(Block block) {
@@ -1400,10 +1399,6 @@ public class BlockManager {
     return blocksMap.addINode(block, iNode);
   }
 
-  void removeINode(Block block) {
-    blocksMap.removeINode(block);
-  }
-
   INodeFile getINode(Block b) {
     return blocksMap.getINode(b);
   }
@@ -1416,8 +1411,8 @@ public class BlockManager {
     return corruptReplicas.numCorruptReplicas(block);
   }
 
-  void removeBlockFromMap(BlockInfo blockInfo) {
-    blocksMap.removeBlock(blockInfo);
+  void removeBlockFromMap(Block block) {
+    blocksMap.removeBlock(block);
   }
   
   public int getCapacity() {

+ 4 - 18
src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java

@@ -346,37 +346,23 @@ class BlocksMap {
     return info;
   }
 
-  /**
-   * Remove INode reference from block b.
-   * If it does not belong to any file and data-nodes,
-   * then remove the block from the block map.
-   */
-  void removeINode(Block b) {
-    BlockInfo info = map.get(b);
-    if (info != null) {
-      info.inode = null;
-      if (info.getDatanode(0) == null) {  // no datanodes left
-        map.remove(b);  // remove block from the map
-      }
-    }
-  }
-
   /**
    * Remove the block from the block map;
    * remove it from all data-node lists it belongs to;
    * and remove all data-node locations associated with the block.
    */
-  void removeBlock(BlockInfo blockInfo) {
+  void removeBlock(Block block) {
+    BlockInfo blockInfo = map.remove(block);
     if (blockInfo == null)
       return;
+
     blockInfo.inode = null;
     for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
       DatanodeDescriptor dn = blockInfo.getDatanode(idx);
       dn.removeBlock(blockInfo); // remove from the list and wipe the location
     }
-    map.remove(blockInfo);  // remove block from the map
   }
-
+  
   /** Returns the block object it it exists in the map. */
   BlockInfo getStoredBlock(Block b) {
     return map.get(b);

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -328,7 +328,7 @@ class FSDirectory implements Closeable {
     synchronized (rootDir) {
       // modify file-> block and blocksMap
       fileNode.removeBlock(block);
-      getBlockManager().removeINode(block);
+      getBlockManager().removeBlockFromMap(block);
       // If block is removed from blocksMap remove it from corruptReplicasMap
       getBlockManager().removeFromCorruptReplicasMap(block);