瀏覽代碼

HADOOP-842. Change HDFS protocol so that open() method is passed the client host name.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@496827 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 年之前
父節點
當前提交
3d49e78524

+ 6 - 1
CHANGES.txt

@@ -6,6 +6,11 @@ Trunk (unreleased changes)
  1. HADOOP-781.  Remove methods deprecated in 0.10 that are no longer
  1. HADOOP-781.  Remove methods deprecated in 0.10 that are no longer
     widely used.  (cutting)
     widely used.  (cutting)
 
 
+ 2. HADOOP-842.  Change HDFS protocol so that the open() method is
+    passed the client hostname, to permit the namenode to order block
+    locations on the basis of network topology.
+    (Hairong Kuang via cutting)
+
 
 
 Release 0.10.1 - 2007-01-10
 Release 0.10.1 - 2007-01-10
 
 
@@ -30,7 +35,7 @@ Release 0.10.1 - 2007-01-10
     (Arun C Murthy via cutting)
     (Arun C Murthy via cutting)
 
 
  8. HADOOP-868.  Decrease the number of open files during map,
  8. HADOOP-868.  Decrease the number of open files during map,
-    respecting io.sort.factor.  (Devaraj Das via cutting)
+    respecting io.sort.fa ctor.  (Devaraj Das via cutting)
 
 
  9. HADOOP-865.  Fix S3 FileSystem so that partially created files can
  9. HADOOP-865.  Fix S3 FileSystem so that partially created files can
     be deleted.  (Tom White via cutting)
     be deleted.  (Tom White via cutting)

+ 6 - 3
src/java/org/apache/hadoop/dfs/ClientProtocol.java

@@ -29,19 +29,22 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  **********************************************************************/
  **********************************************************************/
 interface ClientProtocol extends VersionedProtocol {
 interface ClientProtocol extends VersionedProtocol {
 
 
-  public static final long versionID = 4L; // decommission node added
+  public static final long versionID = 5L; // open() takes a new parameter
   
   
     ///////////////////////////////////////
     ///////////////////////////////////////
     // File contents
     // File contents
     ///////////////////////////////////////
     ///////////////////////////////////////
     /**
     /**
      * Open an existing file, at the given name.  Returns block 
      * Open an existing file, at the given name.  Returns block 
-     * and DataNode info.  The client will then have to contact
+     * and DataNode info.  DataNodes for each block are sorted by
+     * the distance to the clientMachine, which contains the host name.
+     * The client will then have to contact
      * each indicated DataNode to obtain the actual data.  There
      * each indicated DataNode to obtain the actual data.  There
      * is no need to call close() or any other function after
      * is no need to call close() or any other function after
      * calling open().
      * calling open().
      */
      */
-    public LocatedBlock[] open(String src) throws IOException;
+    public LocatedBlock[] open( String clientMachine,
+                                String src) throws IOException;
 
 
     /**
     /**
      * Create a new file.  Get back block and datanode info,
      * Create a new file.  Get back block and datanode info,

+ 1 - 1
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -517,7 +517,7 @@ class DFSClient implements FSConstants {
         synchronized void openInfo() throws IOException {
         synchronized void openInfo() throws IOException {
             Block oldBlocks[] = this.blocks;
             Block oldBlocks[] = this.blocks;
 
 
-            LocatedBlock results[] = namenode.open(src);            
+            LocatedBlock results[] = namenode.open(localName, src);            
             Vector blockV = new Vector();
             Vector blockV = new Vector();
             Vector nodeV = new Vector();
             Vector nodeV = new Vector();
             for (int i = 0; i < results.length; i++) {
             for (int i = 0; i < results.length; i++) {

+ 1 - 1
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -291,7 +291,7 @@ class FSNamesystem implements FSConstants {
      * The client should choose one of the machines from the machineArray
      * The client should choose one of the machines from the machineArray
      * at random.
      * at random.
      */
      */
-    public Object[] open(UTF8 src) {
+    public Object[] open(String clientMachine, UTF8 src) {
         Object results[] = null;
         Object results[] = null;
         Block blocks[] = dir.getFile(src);
         Block blocks[] = dir.getFile(src);
         if (blocks != null) {
         if (blocks != null) {

+ 2 - 2
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -204,8 +204,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     
     
     /**
     /**
      */
      */
-    public LocatedBlock[] open(String src) throws IOException {
-        Object openResults[] = namesystem.open(new UTF8(src));
+    public LocatedBlock[] open(String clientMachine, String src) throws IOException {
+        Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
         if (openResults == null) {
         if (openResults == null) {
             throw new IOException("Cannot open filename " + src);
             throw new IOException("Cannot open filename " + src);
         } else {
         } else {

+ 3 - 1
src/java/org/apache/hadoop/dfs/NamenodeFsck.java

@@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSOutputStream;
 import org.apache.hadoop.fs.FSOutputStream;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.UTF8;
+import org.apache.hadoop.net.DNS;
 
 
 
 
 /**
 /**
@@ -151,7 +152,8 @@ public class NamenodeFsck {
     }
     }
     res.totalFiles++;
     res.totalFiles++;
     res.totalSize += file.getLen();
     res.totalSize += file.getLen();
-    LocatedBlock[] blocks = nn.open(file.getPath());
+    LocatedBlock[] blocks = nn.open(DNS.getDefaultHost("default"),
+                                    file.getPath());
     res.totalBlocks += blocks.length;
     res.totalBlocks += blocks.length;
     if (showFiles) {
     if (showFiles) {
       out.print(file.getPath() + " " + file.getLen() + ", " + blocks.length + " block(s): ");
       out.print(file.getPath() + " " + file.getLen() + ", " + blocks.length + " block(s): ");

+ 2 - 5
src/test/org/apache/hadoop/dfs/TestRestartDFS.java

@@ -38,6 +38,7 @@ public class TestRestartDFS extends TestCase {
   private static String TEST_ROOT_DIR =
   private static String TEST_ROOT_DIR =
     new Path(System.getProperty("test.build.data","/tmp"))
     new Path(System.getProperty("test.build.data","/tmp"))
     .toString().replace(' ', '+');
     .toString().replace(' ', '+');
+  private static Configuration conf = new Configuration();
 
 
   /** class MyFile contains enough information to recreate the contents of
   /** class MyFile contains enough information to recreate the contents of
    * a single file.
    * a single file.
@@ -102,7 +103,6 @@ public class TestRestartDFS extends TestCase {
       files[idx] = new MyFile();
       files[idx] = new MyFile();
     }
     }
     
     
-    Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getNamed(fsname, conf);
     FileSystem fs = FileSystem.getNamed(fsname, conf);
     Path root = new Path(topdir);
     Path root = new Path(topdir);
     
     
@@ -128,7 +128,7 @@ public class TestRestartDFS extends TestCase {
   private static boolean checkFiles(String fsname, String topdir, MyFile[] files) 
   private static boolean checkFiles(String fsname, String topdir, MyFile[] files) 
   throws IOException {
   throws IOException {
     
     
-    Configuration conf = new Configuration();
+    //Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getNamed(fsname, conf);
     FileSystem fs = FileSystem.getNamed(fsname, conf);
     Path root = new Path(topdir);
     Path root = new Path(topdir);
     
     
@@ -156,7 +156,6 @@ public class TestRestartDFS extends TestCase {
   /** delete directory and everything underneath it.*/
   /** delete directory and everything underneath it.*/
   private static void deldir(String fsname, String topdir)
   private static void deldir(String fsname, String topdir)
   throws IOException {
   throws IOException {
-    Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getNamed(fsname, conf);
     FileSystem fs = FileSystem.getNamed(fsname, conf);
     Path root = new Path(topdir);
     Path root = new Path(topdir);
     fs.delete(root);
     fs.delete(root);
@@ -168,7 +167,6 @@ public class TestRestartDFS extends TestCase {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     MyFile[] files = null;
     MyFile[] files = null;
     try {
     try {
-      Configuration conf = new Configuration();
       cluster = new MiniDFSCluster(65314, conf, 4, false);
       cluster = new MiniDFSCluster(65314, conf, 4, false);
       namenode = conf.get("fs.default.name", "local");
       namenode = conf.get("fs.default.name", "local");
       if (!"local".equals(namenode)) {
       if (!"local".equals(namenode)) {
@@ -179,7 +177,6 @@ public class TestRestartDFS extends TestCase {
     }
     }
     assertTrue("Error creating files", files != null);
     assertTrue("Error creating files", files != null);
     try {
     try {
-      Configuration conf = new Configuration();
       // Here we restart the MiniDFScluster without formatting namenode
       // Here we restart the MiniDFScluster without formatting namenode
       cluster = new MiniDFSCluster(65320, conf, 4, false, false);
       cluster = new MiniDFSCluster(65320, conf, 4, false, false);
       namenode = conf.get("fs.default.name", "local");
       namenode = conf.get("fs.default.name", "local");

+ 7 - 3
src/webapps/datanode/browseBlock.jsp

@@ -8,6 +8,7 @@
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.conf.*"
+  import="org.apache.hadoop.net.DNS"
   import="java.text.DateFormat"
   import="java.text.DateFormat"
 %>
 %>
 
 
@@ -67,7 +68,8 @@
     blockSize = Long.parseLong(blockSizeStr);
     blockSize = Long.parseLong(blockSizeStr);
 
 
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
-    LocatedBlock[] blocks = dfs.namenode.open(filename);
+    LocatedBlock[] blocks = dfs.namenode.open(
+        DNS.getDefaultHost("default"), filename);
     //Add the various links for looking at the file contents
     //Add the various links for looking at the file contents
     //URL for downloading the full file
     //URL for downloading the full file
     String downloadUrl = "http://" + req.getServerName() + ":" +
     String downloadUrl = "http://" + req.getServerName() + ":" +
@@ -229,7 +231,8 @@
     //determine data for the next link
     //determine data for the next link
     if (startOffset + chunkSizeToView >= blockSize) {
     if (startOffset + chunkSizeToView >= blockSize) {
       //we have to go to the next block from this point onwards
       //we have to go to the next block from this point onwards
-      LocatedBlock[] blocks = dfs.namenode.open(filename);
+      LocatedBlock[] blocks = dfs.namenode.open(
+           DNS.getDefaultHost("default"), filename);
       for (int i = 0; i < blocks.length; i++) {
       for (int i = 0; i < blocks.length; i++) {
         if (blocks[i].getBlock().getBlockId() == blockId) {
         if (blocks[i].getBlock().getBlockId() == blockId) {
           if (i != blocks.length - 1) {
           if (i != blocks.length - 1) {
@@ -276,7 +279,8 @@
     int prevPort = req.getServerPort();
     int prevPort = req.getServerPort();
     int prevDatanodePort = datanodePort;
     int prevDatanodePort = datanodePort;
     if (startOffset == 0) {
     if (startOffset == 0) {
-      LocatedBlock [] blocks = dfs.namenode.open(filename);
+      LocatedBlock [] blocks = dfs.namenode.open(
+              DNS.getDefaultHost("default"), filename);
       for (int i = 0; i < blocks.length; i++) {
       for (int i = 0; i < blocks.length; i++) {
         if (blocks[i].getBlock().getBlockId() == blockId) {
         if (blocks[i].getBlock().getBlockId() == blockId) {
           if (i != 0) {
           if (i != 0) {

+ 3 - 1
src/webapps/datanode/browseDirectory.jsp

@@ -8,6 +8,7 @@
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.conf.*"
+  import="org.apache.hadoop.net.DNS"
   import="java.text.DateFormat"
   import="java.text.DateFormat"
 %>
 %>
 <%!
 <%!
@@ -54,7 +55,8 @@
       //Get the location of the first block of the file
       //Get the location of the first block of the file
       if (files[i].getPath().endsWith(".crc")) continue;
       if (files[i].getPath().endsWith(".crc")) continue;
       if (!files[i].isDir()) {
       if (!files[i].isDir()) {
-        LocatedBlock[] blocks = dfs.namenode.open(files[i].getPath());
+        LocatedBlock[] blocks = dfs.namenode.open(
+            DNS.getDefaultHost("default"), files[i].getPath());
         DatanodeInfo [] locations = blocks[0].getLocations();
         DatanodeInfo [] locations = blocks[0].getLocations();
         if (locations.length == 0) {
         if (locations.length == 0) {
           cols[0] = files[i].getPath();
           cols[0] = files[i].getPath();

+ 3 - 1
src/webapps/datanode/tail.jsp

@@ -8,6 +8,7 @@
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.conf.*"
+  import="org.apache.hadoop.net.DNS"
   import="java.text.DateFormat"
   import="java.text.DateFormat"
 %>
 %>
 
 
@@ -54,7 +55,8 @@
     //fetch the block from the datanode that has the last block for this file
     //fetch the block from the datanode that has the last block for this file
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, 
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, 
                                          jspHelper.conf);
                                          jspHelper.conf);
-    LocatedBlock blocks[] = dfs.namenode.open(filename);
+    LocatedBlock blocks[] = dfs.namenode.open(
+        DNS.getDefaultHost("default"), filename); 
     if (blocks == null || blocks.length == 0) {
     if (blocks == null || blocks.length == 0) {
       out.print("No datanodes contain blocks of file "+filename);
       out.print("No datanodes contain blocks of file "+filename);
       dfs.close();
       dfs.close();

+ 6 - 2
src/webapps/dfs/browseDirectory.jsp

@@ -9,6 +9,7 @@
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.dfs.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.conf.*"
+  import="org.apache.hadoop.net.DNS"
   import="java.text.DateFormat"
   import="java.text.DateFormat"
 %>
 %>
 <%!
 <%!
@@ -32,7 +33,8 @@
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
     DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
     UTF8 target = new UTF8(dir);
     UTF8 target = new UTF8(dir);
     if( !dfs.isDirectory(target) ) { // a file
     if( !dfs.isDirectory(target) ) { // a file
-      LocatedBlock[] blocks = dfs.namenode.open(dir);
+      LocatedBlock[] blocks = dfs.namenode.open(
+          DNS.getDefaultHost("default"), dir);
       DatanodeInfo [] locations = blocks[0].getLocations();
       DatanodeInfo [] locations = blocks[0].getLocations();
       if (locations.length == 0) {
       if (locations.length == 0) {
         out.print("Empty file");
         out.print("Empty file");
@@ -86,7 +88,9 @@
       //Get the location of the first block of the file
       //Get the location of the first block of the file
       if (files[i].getPath().endsWith(".crc")) continue;
       if (files[i].getPath().endsWith(".crc")) continue;
       if (!files[i].isDir()) {
       if (!files[i].isDir()) {
-        LocatedBlock[] blocks = dfs.namenode.open(files[i].getPath());
+        LocatedBlock[] blocks = dfs.namenode.open(
+            DNS.getDefaultHost("default"), files[i].getPath());
+
         DatanodeInfo [] locations = blocks[0].getLocations();
         DatanodeInfo [] locations = blocks[0].getLocations();
         if (locations.length == 0) {
         if (locations.length == 0) {
           cols[0] = files[i].getName();
           cols[0] = files[i].getName();