ソースを参照

HADOOP-1117. Fix DFS scalability: when the namenode is restarted it consumes 80% CPU. Contributed by Dhruba Borthakur.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@518327 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 18 年 前
コミット
3585482ad5
2 ファイル変更19 行追加9 行削除
  1. 4 0
      CHANGES.txt
  2. 15 9
      src/java/org/apache/hadoop/dfs/FSNamesystem.java

+ 4 - 0
CHANGES.txt

@@ -58,6 +58,10 @@ Trunk (unreleased changes)
 17. HADOOP-1109.  Fix NullPointerException in StreamInputFormat.
     (Koji Noguchi via tomwhite)
 
+18. HADOOP-1117.  Fix DFS scalability: when the namenode is
+    restarted it consumes 80% CPU. (Dhruba Borthakur via
+    tomwhite)
+
 
 Release 0.12.0 - 2007-03-02
 

+ 15 - 9
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -2121,10 +2121,12 @@ class FSNamesystem implements FSConstants {
         // check whether safe replication is reached for the block
         // only if it is a part of a files
         incrementSafeBlockCount( numCurrentReplica );
-        
+ 
         // handle underReplication/overReplication
         short fileReplication = fileINode.getReplication();
-        if(numCurrentReplica < fileReplication) {
+        if (numCurrentReplica >= fileReplication) {
+          neededReplications.remove(block);
+        } else {
           neededReplications.update(block, curReplicaDelta, 0);
         }
         proccessOverReplicatedBlock( block, fileReplication );
@@ -2640,17 +2642,21 @@ class FSNamesystem implements FSConstants {
                   filterDecommissionedNodes(containingNodes);
               int numCurrentReplica = nodes.size() +
                                       pendingReplications.getNumReplicas(block);
-              DatanodeDescriptor targets[] = replicator.chooseTarget(
+              if (numCurrentReplica >= fileINode.getReplication()) {
+                it.remove();
+              } else {
+                DatanodeDescriptor targets[] = replicator.chooseTarget(
                   Math.min( fileINode.getReplication() - numCurrentReplica,
                             needed),
                   datanodeMap.get(srcNode.getStorageID()),
                   nodes, null, blockSize);
-              if (targets.length > 0) {
-                // Build items to return
-                replicateBlocks.add(block);
-                numCurrentReplicas.add(new Integer(numCurrentReplica));
-                replicateTargetSets.add(targets);
-                needed -= targets.length;
+                if (targets.length > 0) {
+                  // Build items to return
+                  replicateBlocks.add(block);
+                  numCurrentReplicas.add(new Integer(numCurrentReplica));
+                  replicateTargetSets.add(targets);
+                  needed -= targets.length;
+                }
               }
             }
           }