فهرست منبع

The nodes listed in include and exclude files
are always listed in the datanode report.
(Raghu Angadi via dhruba)



git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@580461 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur 18 سال پیش
والد
کامیت
ca86a2a73e
3فایلهای تغییر یافته به همراه74 افزوده شده و 28 حذف شده
  1. 4 0
      CHANGES.txt
  2. 67 27
      src/java/org/apache/hadoop/dfs/FSNamesystem.java
  3. 3 1
      src/java/org/apache/hadoop/util/HostsFileReader.java

+ 4 - 0
CHANGES.txt

@@ -94,6 +94,10 @@ Trunk (unreleased changes)
 
   BUG FIXES
 
+    HADOOP-1933. The nodes listed in include and exclude files 
+    are always listed in the datanode report.
+    (Raghu Angadi via dhruba)
+
     HADOOP-1953. The job tracker should wait beteween calls to try and delete 
     the system directory (Owen O'Malley via devaraj)
 

+ 67 - 27
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -2474,43 +2474,83 @@ class FSNamesystem implements FSConstants {
     }
   }
 
-  public synchronized DatanodeInfo[] datanodeReport( DatanodeReportType type ) {
-    ArrayList<DatanodeInfo> results = new ArrayList<DatanodeInfo>();
+  private synchronized ArrayList<DatanodeDescriptor> getDatanodeListForReport(
+                                                      DatanodeReportType type) {                  
+    
+    boolean listLiveNodes = type == DatanodeReportType.ALL ||
+                            type == DatanodeReportType.LIVE;
+    boolean listDeadNodes = type == DatanodeReportType.ALL ||
+                            type == DatanodeReportType.DEAD;
+
+    HashMap<String, String> mustList = new HashMap<String, String>();
+    
+    if (listDeadNodes) {
+      //first load all the nodes listed in include and exclude files.
+      for (Iterator<String> it = hostsReader.getHosts().iterator(); 
+           it.hasNext();) {
+        mustList.put(it.next(), "");
+      }
+      for (Iterator<String> it = hostsReader.getExcludedHosts().iterator(); 
+           it.hasNext();) {
+        mustList.put(it.next(), "");
+      }
+    }
+   
+    ArrayList<DatanodeDescriptor> nodes = null;
+    
     synchronized (datanodeMap) {
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) {
-        DatanodeDescriptor tmp = it.next();
-        switch (type) {
-        case ALL: 
-          results.add(new DatanodeInfo(tmp));
-          break;
-        case DEAD: 
-          if(isDatanodeDead(tmp)) {
-            results.add(new DatanodeInfo(tmp));
-          }
-          break;
-        case LIVE:
-          if(!isDatanodeDead(tmp)) {
-            results.add(new DatanodeInfo(tmp));
-          }
-          break;
+      nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size() + 
+                                                mustList.size());
+      
+      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); 
+                                                               it.hasNext();) {
+        DatanodeDescriptor dn = it.next();
+        boolean isDead = isDatanodeDead(dn);
+        if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
+          nodes.add(dn);
         }
+        //Remove any form of the this datanode in include/exclude lists.
+        mustList.remove(dn.getName());
+        mustList.remove(dn.getHost());
+        mustList.remove(dn.getHostName());
+      }
+    }
+    
+    if (listDeadNodes) {
+      for (Iterator<String> it = mustList.keySet().iterator(); it.hasNext();) {
+        DatanodeDescriptor dn = 
+            new DatanodeDescriptor(new DatanodeID(it.next(), "", 0));
+        dn.setLastUpdate(0);
+        nodes.add(dn);
       }
     }
-    return results.toArray(new DatanodeInfo[results.size()]);
+    
+    return nodes;
+  }
+
+  public synchronized DatanodeInfo[] datanodeReport( DatanodeReportType type ) {
+
+    ArrayList<DatanodeDescriptor> results = getDatanodeListForReport(type);
+    DatanodeInfo[] arr = new DatanodeInfo[results.size()];
+    for (int i=0; i<arr.length; i++) {
+      arr[i] = new DatanodeInfo(results.get(i));
+    }
+    return arr;
   }
     
   /**
    */
   public synchronized void DFSNodesStatus(ArrayList<DatanodeDescriptor> live, 
                                           ArrayList<DatanodeDescriptor> dead) {
-    synchronized (datanodeMap) {
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) {
-        DatanodeDescriptor node = it.next();
-        if (isDatanodeDead(node))
-          dead.add(node);
-        else
-          live.add(node);
-      }
+
+    ArrayList<DatanodeDescriptor> results = 
+                            getDatanodeListForReport(DatanodeReportType.ALL);    
+    for(Iterator<DatanodeDescriptor> it = results.iterator(); it.hasNext();) {
+      DatanodeDescriptor node = it.next();
+      if (isDatanodeDead(node))
+        dead.add(node);
+      else
+        live.add(node);
     }
   }
 

+ 3 - 1
src/java/org/apache/hadoop/util/HostsFileReader.java

@@ -49,7 +49,9 @@ public class HostsFileReader {
         String[] nodes = line.split("[ \t\n\f\r]+");
         if (nodes != null) {
           for (int i = 0; i < nodes.length; i++) {
-            set.add(nodes[i]);  // might need to add canonical name
+            if (!nodes[i].equals("")) {
+              set.add(nodes[i]);  // might need to add canonical name
+            }
           }
         }
       }