Ver código fonte

HADOOP-386. When removing excess DFS block replicas, remove those on nodes with the least free space first.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@425720 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 anos atrás
pai
commit
6f3e3ab516
2 arquivos alterados com 20 adições e 5 exclusões
  1. 3 0
      CHANGES.txt
  2. 17 5
      src/java/org/apache/hadoop/dfs/FSNamesystem.java

+ 3 - 0
CHANGES.txt

@@ -97,6 +97,9 @@ Trunk (unreleased changes)
     that multiple datanode's can be run on a single host.
     (Devaraj Das via cutting)
 
+28. HADOOP-386.  When removing excess DFS block replicas, remove those
+    on nodes with the least space first.  (Johan Oskarson via cutting)
+
 
 Release 0.4.0 - 2006-06-28
 

+ 17 - 5
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -1391,14 +1391,26 @@ class FSNamesystem implements FSConstants {
      *
      * srcNodes.size() - dstNodes.size() == replication
      *
-     * For now, we choose nodes randomly.  In the future, we might enforce some
-     * kind of policy (like making sure replicates are spread across racks).
+     * We pick node with least free space
+     * In the future, we might enforce some kind of policy 
+     * (like making sure replicates are spread across racks).
      */
     void chooseExcessReplicates(Vector nonExcess, Block b, short replication) {
         while (nonExcess.size() - replication > 0) {
-            int chosenNode = r.nextInt(nonExcess.size());
-            DatanodeDescriptor cur = (DatanodeDescriptor) nonExcess.elementAt(chosenNode);
-            nonExcess.removeElementAt(chosenNode);
+            DatanodeInfo cur = null;
+            long minSpace = Long.MAX_VALUE;
+            
+            for (Iterator iter = nonExcess.iterator(); iter.hasNext();) {
+                DatanodeInfo node = (DatanodeInfo) iter.next();
+                long free = node.getRemaining();
+                
+                if(minSpace > free) {
+                    minSpace = free;
+                    cur = node;
+                }
+            }
+            
+            nonExcess.remove(cur);
 
             TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(cur.getStorageID());
             if (excessBlocks == null) {