Procházet zdrojové kódy

HADOOP-5687. NameNode throws NPE if fs.default.name is the default value. Contributed by Philip Zeyliger.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@777321 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Shvachko před 16 roky
rodič
revize
dec99970b8

+ 3 - 0
CHANGES.txt

@@ -653,6 +653,9 @@ Trunk (unreleased changes)
     HADOOP-5782. Revert a few formatting changes introduced in HADOOP-5015.
     (Suresh Srinivas via rangadi)
 
+    HADOOP-5687. NameNode throws NPE if fs.default.name is the default value.
+    (Philip Zeyliger via shv)
+
 Release 0.20.1 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 1 - 1
src/core/org/apache/hadoop/fs/FileSystem.java

@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ReflectionUtils;
  * implementation is DistributedFileSystem.
  *****************************************************************/
 public abstract class FileSystem extends Configured implements Closeable {
-  private static final String FS_DEFAULT_NAME_KEY = "fs.default.name";
+  public static final String FS_DEFAULT_NAME_KEY = "fs.default.name";
 
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
 

+ 3 - 0
src/core/org/apache/hadoop/net/NetUtils.java

@@ -132,6 +132,9 @@ public class NetUtils {
    */
   public static InetSocketAddress createSocketAddr(String target,
                                                    int defaultPort) {
+    if (target == null) {
+      throw new IllegalArgumentException("Target address cannot be null.");
+    }
     int colonIndex = target.indexOf(':');
     if (colonIndex < 0 && defaultPort == -1) {
       throw new RuntimeException("Not a host:port pair: " + target);

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -80,7 +80,7 @@ public class DistributedFileSystem extends FileSystem {
 
     InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
     this.dfs = new DFSClient(namenode, conf, statistics);
-    this.uri = URI.create("hdfs://" + uri.getAuthority());
+    this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
     this.workingDir = getHomeDirectory();
   }
 

+ 5 - 0
src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java

@@ -76,6 +76,11 @@ public interface FSConstants {
     FORCE_PROCEED;
   }
 
+  /**
+   * URI Scheme for hdfs://namenode/ URIs.
+   */
+  public static final String HDFS_URI_SCHEME = "hdfs";
+
   // Version is reflected in the dfs image and edit log files.
   // Version is reflected in the data storage file.
   // Versions are negative.

+ 17 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -189,13 +189,28 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
   }
 
   public static InetSocketAddress getAddress(Configuration conf) {
-    return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
+    URI filesystemURI = FileSystem.getDefaultUri(conf);
+    String authority = filesystemURI.getAuthority();
+    if (authority == null) {
+      throw new IllegalArgumentException(String.format(
+          "Invalid URI for NameNode address (check %s): %s has no authority.",
+          FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
+    }
+    if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
+        filesystemURI.getScheme())) {
+      throw new IllegalArgumentException(String.format(
+          "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
+          FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
+          FSConstants.HDFS_URI_SCHEME));
+    }
+    return getAddress(authority);
   }
 
   public static URI getUri(InetSocketAddress namenode) {
     int port = namenode.getPort();
     String portString = port == DEFAULT_PORT ? "" : (":"+port);
-    return URI.create("hdfs://"+ namenode.getHostName()+portString);
+    return URI.create(FSConstants.HDFS_URI_SCHEME + "://" 
+        + namenode.getHostName()+portString);
   }
 
   /**

+ 2 - 1
src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
@@ -276,7 +277,7 @@ public class SecondaryNameNode implements Runnable {
    */
   private String getInfoServer() throws IOException {
     URI fsName = FileSystem.getDefaultUri(conf);
-    if (!"hdfs".equals(fsName.getScheme())) {
+    if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
       throw new IOException("This is not a DFS");
     }
     return conf.get("dfs.http.address", "0.0.0.0:50070");