Преглед на файлове

HADOOP-3084. Fix HftpFileSystem to work for zero-lenghth files.
(cdouglas)



git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.16@642011 13f79535-47bb-0310-9956-ffa450edef68

Christopher Douglas преди 17 години
родител
ревизия
9f4df91e92

+ 3 - 0
CHANGES.txt

@@ -46,6 +46,9 @@ Release 0.16.2 - Unreleased
     HADOOP-3070. Protect the trash emptier thread from null pointer
     HADOOP-3070. Protect the trash emptier thread from null pointer
     exceptions. (Koji Noguchi via omalley)
     exceptions. (Koji Noguchi via omalley)
 
 
+    HADOOP-3084. Fix HftpFileSystem to work for zero-lenghth files.
+    (cdouglas)
+
 Release 0.16.1 - 2008-03-13
 Release 0.16.1 - 2008-03-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 6 - 0
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -3449,6 +3449,7 @@ class FSNamesystem implements FSConstants, FSNamesystemMBean {
   }
   }
     
     
   /** Stop at and return the datanode at index (used for content browsing)*/
   /** Stop at and return the datanode at index (used for content browsing)*/
+  @Deprecated
   private DatanodeDescriptor getDatanodeByIndex(int index) {
   private DatanodeDescriptor getDatanodeByIndex(int index) {
     int i = 0;
     int i = 0;
     for (DatanodeDescriptor node : datanodeMap.values()) {
     for (DatanodeDescriptor node : datanodeMap.values()) {
@@ -3460,6 +3461,7 @@ class FSNamesystem implements FSConstants, FSNamesystemMBean {
     return null;
     return null;
   }
   }
     
     
+  @Deprecated
   public String randomDataNode() {
   public String randomDataNode() {
     int size = datanodeMap.size();
     int size = datanodeMap.size();
     int index = 0;
     int index = 0;
@@ -3476,6 +3478,10 @@ class FSNamesystem implements FSConstants, FSNamesystemMBean {
     }
     }
     return null;
     return null;
   }
   }
+
+  public DatanodeDescriptor getRandomDatanode() {
+    return replicator.chooseTarget(1, null, null, 0)[0];
+  }
     
     
   public int getNameNodeInfoPort() {
   public int getNameNodeInfoPort() {
     return infoPort;
     return infoPort;

+ 15 - 31
src/java/org/apache/hadoop/dfs/FileDataServlet.java

@@ -36,48 +36,32 @@ import org.apache.hadoop.security.UnixUserGroupInformation;
 public class FileDataServlet extends DfsServlet {
 public class FileDataServlet extends DfsServlet {
   private static URI createUri(DFSFileInfo i, UnixUserGroupInformation ugi,
   private static URI createUri(DFSFileInfo i, UnixUserGroupInformation ugi,
       ClientProtocol nnproxy) throws IOException, URISyntaxException {
       ClientProtocol nnproxy) throws IOException, URISyntaxException {
-    final DatanodeInfo host = pickSrcDatanode(i, nnproxy);
-    return new URI("http", null, host.getHostName(), host.getInfoPort(),
+    final DatanodeID host = pickSrcDatanode(i, nnproxy);
+    final String hostname;
+    if (host instanceof DatanodeInfo) {
+      hostname = ((DatanodeInfo)host).getHostName();
+    } else {
+      hostname = host.getHost();
+    }
+    return new URI("http", null, hostname, host.getInfoPort(),
           "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
           "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
   }
   }
 
 
-  private final static int BLOCK_SAMPLE = 5;
+  private final static JspHelper jspHelper = new JspHelper();
 
 
   /** Select a datanode to service this request.
   /** Select a datanode to service this request.
    * Currently, this looks at no more than the first five blocks of a file,
    * Currently, this looks at no more than the first five blocks of a file,
    * selecting a datanode randomly from the most represented.
    * selecting a datanode randomly from the most represented.
    */
    */
-  private static DatanodeInfo pickSrcDatanode(DFSFileInfo i,
+  private static DatanodeID pickSrcDatanode(DFSFileInfo i,
       ClientProtocol nnproxy) throws IOException {
       ClientProtocol nnproxy) throws IOException {
-    long sample;
-    if (i.getLen() == 0) sample = 1;
-    else sample = i.getLen() / i.getBlockSize() > BLOCK_SAMPLE
-        ? i.getBlockSize() * BLOCK_SAMPLE - 1
-        : i.getLen();
     final LocatedBlocks blks = nnproxy.getBlockLocations(
     final LocatedBlocks blks = nnproxy.getBlockLocations(
-        i.getPath().toUri().getPath(), 0, sample);
-    HashMap<DatanodeInfo, Integer> count = new HashMap<DatanodeInfo, Integer>();
-    for (LocatedBlock b : blks.getLocatedBlocks()) {
-      for (DatanodeInfo d : b.getLocations()) {
-        if (!count.containsKey(d)) {
-          count.put(d, 0);
-        }
-        count.put(d, count.get(d) + 1);
-      }
-    }
-    ArrayList<DatanodeInfo> loc = new ArrayList<DatanodeInfo>();
-    int max = 0;
-    for (Map.Entry<DatanodeInfo, Integer> e : count.entrySet()) {
-      if (e.getValue() > max) {
-        loc.clear();
-        max = e.getValue();
-      }
-      if (e.getValue() == max) {
-        loc.add(e.getKey());
-      }
+        i.getPath().toUri().getPath(), 0, 1);
+    if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
+      // pick a random datanode
+      return jspHelper.randomNode();
     }
     }
-    final Random r = new Random();
-    return loc.get(r.nextInt(loc.size()));
+    return jspHelper.bestNode(blks.get(0));
   }
   }
 
 
   /**
   /**

+ 5 - 0
src/java/org/apache/hadoop/dfs/JspHelper.java

@@ -65,6 +65,11 @@ public class JspHelper {
     UnixUserGroupInformation.saveToConf(conf,
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, webUGI);
         UnixUserGroupInformation.UGI_PROPERTY_NAME, webUGI);
   }
   }
+
+  public DatanodeID randomNode() throws IOException {
+    return fsn.getRandomDatanode();
+  }
+
   public DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
   public DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
     DatanodeInfo chosenNode = null;
     DatanodeInfo chosenNode = null;