Selaa lähdekoodia

HDFS-599. Allow NameNode to have a separate port for service requests from client request. Contributed by Dmytro Molkov.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@956141 13f79535-47bb-0310-9956-ffa450edef68
Hairong Kuang 15 vuotta sitten
vanhempi
commit
442d0b6433

+ 3 - 0
CHANGES.txt

@@ -7,6 +7,9 @@ Trunk (unreleased changes)
     HDFS-992. Re-factor block access token implementation to conform to the 
     HDFS-992. Re-factor block access token implementation to conform to the 
     generic Token interface in Common (Kan Zhang and Jitendra Pandey via jghoman)
     generic Token interface in Common (Kan Zhang and Jitendra Pandey via jghoman)
 
 
+    HDFS-599. Allow NameNode to have a seprate port for service requests from
+    client requests. (Dmytro Molkov via hairong)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-1110. Reuses objects for commonly used file names in namenode to
     HDFS-1110. Reuses objects for commonly used file names in namenode to

+ 4 - 0
src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -43,10 +43,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+  public static final String  DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address";
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
   public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
   public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
   public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
   public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
@@ -164,6 +166,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
   public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
+  public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";
+  public static final int     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
   public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
   public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
   public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
   public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
   public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -909,7 +909,7 @@ public class Balancer implements Tool {
    * set up the retry policy */ 
    * set up the retry policy */ 
   private static NamenodeProtocol createNamenode(Configuration conf)
   private static NamenodeProtocol createNamenode(Configuration conf)
     throws IOException {
     throws IOException {
-    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);
+    InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true);
     RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
     RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
         5, 200, TimeUnit.MILLISECONDS);
         5, 200, TimeUnit.MILLISECONDS);
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =

+ 3 - 3
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -233,7 +233,7 @@ public class DataNode extends Configured
            final AbstractList<File> dataDirs) throws IOException {
            final AbstractList<File> dataDirs) throws IOException {
     this(conf, dataDirs, (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
     this(conf, dataDirs, (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
                        DatanodeProtocol.versionID,
                        DatanodeProtocol.versionID,
-                       NameNode.getAddress(conf), 
+                       NameNode.getServiceAddress(conf, true), 
                        conf));
                        conf));
   }
   }
   
   
@@ -284,7 +284,7 @@ public class DataNode extends Configured
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.nameserver","default"));
                                      conf.get("dfs.datanode.dns.nameserver","default"));
     }
     }
-    this.nameNodeAddr = NameNode.getAddress(conf);
+    this.nameNodeAddr = NameNode.getServiceAddress(conf, true);
     
     
     this.socketTimeout =  conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
     this.socketTimeout =  conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
                                       HdfsConstants.READ_TIMEOUT);
                                       HdfsConstants.READ_TIMEOUT);
@@ -305,7 +305,7 @@ public class DataNode extends Configured
 
 
     // connect to name node
     // connect to name node
     this.namenode = namenode;
     this.namenode = namenode;
-    
+
     // get version and id info from the name-node
     // get version and id info from the name-node
     NamespaceInfo nsInfo = handshake();
     NamespaceInfo nsInfo = handshake();
     StartupOption startOpt = getStartupOption(conf);
     StartupOption startOpt = getStartupOption(conf);

+ 18 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -56,6 +56,7 @@ public class BackupNode extends NameNode {
   private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
   private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
   private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
   private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
   private static final String BN_HTTP_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
   private static final String BN_HTTP_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
+  private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
 
 
   /** Name-node proxy */
   /** Name-node proxy */
   NamenodeProtocol namenode;
   NamenodeProtocol namenode;
@@ -80,11 +81,27 @@ public class BackupNode extends NameNode {
     String hostName = DNS.getDefaultHost("default");
     String hostName = DNS.getDefaultHost("default");
     return new InetSocketAddress(hostName, port);
     return new InetSocketAddress(hostName, port);
   }
   }
+  
+  @Override
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) throws IOException {
+    String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return null;
+    }
+    int port = NetUtils.createSocketAddr(addr).getPort();
+    String hostName = DNS.getDefaultHost("default");
+    return new InetSocketAddress(hostName, port);
+  }
 
 
   @Override // NameNode
   @Override // NameNode
   protected void setRpcServerAddress(Configuration conf) {
   protected void setRpcServerAddress(Configuration conf) {
     conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(rpcAddress));
     conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(rpcAddress));
   }
   }
+  
+  @Override // Namenode
+  protected void setRpcServiceServerAddress(Configuration conf) {
+    conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(serviceRPCAddress));
+  }
 
 
   @Override // NameNode
   @Override // NameNode
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
@@ -229,7 +246,7 @@ public class BackupNode extends NameNode {
 
 
   private NamespaceInfo handshake(Configuration conf) throws IOException {
   private NamespaceInfo handshake(Configuration conf) throws IOException {
     // connect to name node
     // connect to name node
-    InetSocketAddress nnAddress = super.getRpcServerAddress(conf);
+    InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
     this.namenode =
     this.namenode =
       (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
       (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
           NamenodeProtocol.versionID, nnAddress, conf);
           NamenodeProtocol.versionID, nnAddress, conf);

+ 71 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -136,7 +136,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
   }
   }
   
   
   public long getProtocolVersion(String protocol, 
   public long getProtocolVersion(String protocol, 
-                                 long clientVersion) throws IOException { 
+                                 long clientVersion) throws IOException {
     if (protocol.equals(ClientProtocol.class.getName())) {
     if (protocol.equals(ClientProtocol.class.getName())) {
       return ClientProtocol.versionID; 
       return ClientProtocol.versionID; 
     } else if (protocol.equals(DatanodeProtocol.class.getName())){
     } else if (protocol.equals(DatanodeProtocol.class.getName())){
@@ -159,10 +159,18 @@ public class NameNode implements NamenodeProtocols, FSConstants {
 
 
   protected FSNamesystem namesystem; 
   protected FSNamesystem namesystem; 
   protected NamenodeRole role;
   protected NamenodeRole role;
-  /** RPC server */
+  /** RPC server. */
   protected Server server;
   protected Server server;
+  /** RPC server for HDFS Services communication.
+      BackupNode, Datanodes and all other services
+      should be connecting to this server if it is
+      configured. Clients should only go to NameNode#server
+  */
+  protected Server serviceRpcServer;
   /** RPC server address */
   /** RPC server address */
   protected InetSocketAddress rpcAddress = null;
   protected InetSocketAddress rpcAddress = null;
+  /** RPC server for DN address */
+  protected InetSocketAddress serviceRPCAddress = null;
   /** httpServer */
   /** httpServer */
   protected HttpServer httpServer;
   protected HttpServer httpServer;
   /** HTTP server address */
   /** HTTP server address */
@@ -204,6 +212,32 @@ public class NameNode implements NamenodeProtocols, FSConstants {
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
   }
   }
 
 
+  /**
+   * Set the configuration property for the service rpc address
+   * to address
+   */
+  public static void setServiceAddress(Configuration conf,
+                                           String address) {
+    LOG.info("Setting ADDRESS " + address);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
+  }
+  
+  /**
+   * Fetches the address for services to use when connecting to namenode
+   * based on the value of fallback returns null if the special
+   * address is not specified or returns the default namenode address
+   * to be used by both clients and services.
+   * Services here are datanodes, backup node, any non client connection
+   */
+  public static InetSocketAddress getServiceAddress(Configuration conf,
+                                                        boolean fallback) {
+    String addr = conf.get(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return fallback ? getAddress(conf) : null;
+    }
+    return getAddress(addr);
+  }
+
   public static InetSocketAddress getAddress(Configuration conf) {
   public static InetSocketAddress getAddress(Configuration conf) {
     URI filesystemURI = FileSystem.getDefaultUri(conf);
     URI filesystemURI = FileSystem.getDefaultUri(conf);
     String authority = filesystemURI.getAuthority();
     String authority = filesystemURI.getAuthority();
@@ -247,9 +281,25 @@ public class NameNode implements NamenodeProtocols, FSConstants {
     return role.equals(that);
     return role.equals(that);
   }
   }
 
 
+  /**
+   * Given a configuration get the address of the service rpc server
+   * If the service rpc is not configured returns null
+   */
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf)
+    throws IOException {
+    return NameNode.getServiceAddress(conf, false);
+  }
+
   protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
   protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
     return getAddress(conf);
     return getAddress(conf);
   }
   }
+  
+  /**
+   * Modifies the configuration passed to contain the service rpc address setting
+   */
+  protected void setRpcServiceServerAddress(Configuration conf) {
+    setServiceAddress(conf, getHostPortString(serviceRPCAddress));
+  }
 
 
   protected void setRpcServerAddress(Configuration conf) {
   protected void setRpcServerAddress(Configuration conf) {
     FileSystem.setDefaultUri(conf, getUri(rpcAddress));
     FileSystem.setDefaultUri(conf, getUri(rpcAddress));
@@ -298,7 +348,18 @@ public class NameNode implements NamenodeProtocols, FSConstants {
 
 
     NameNode.initMetrics(conf, this.getRole());
     NameNode.initMetrics(conf, this.getRole());
     loadNamesystem(conf);
     loadNamesystem(conf);
-    // create rpc server 
+    // create rpc server
+    InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
+    if (dnSocketAddr != null) {
+      int serviceHandlerCount =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+      this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
+          dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
+          false, conf, namesystem.getDelegationTokenSecretManager());
+      this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
+      setRpcServiceServerAddress(conf);
+    }
     this.server = RPC.getServer(NamenodeProtocols.class, this,
     this.server = RPC.getServer(NamenodeProtocols.class, this,
                                 socAddr.getHostName(), socAddr.getPort(),
                                 socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf, 
                                 handlerCount, false, conf, 
@@ -309,6 +370,9 @@ public class NameNode implements NamenodeProtocols, FSConstants {
 
 
     activate(conf);
     activate(conf);
     LOG.info(getRole() + " up at: " + rpcAddress);
     LOG.info(getRole() + " up at: " + rpcAddress);
+    if (serviceRPCAddress != null) {
+      LOG.info(getRole() + " service server is up at: " + serviceRPCAddress); 
+    }
   }
   }
 
 
   /**
   /**
@@ -322,6 +386,9 @@ public class NameNode implements NamenodeProtocols, FSConstants {
     namesystem.activate(conf);
     namesystem.activate(conf);
     startHttpServer(conf);
     startHttpServer(conf);
     server.start();  //start RPC server
     server.start();  //start RPC server
+    if (serviceRpcServer != null) {
+      serviceRpcServer.start();      
+    }
     startTrashEmptier(conf);
     startTrashEmptier(conf);
     
     
     plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
     plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
@@ -471,6 +538,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
     if(namesystem != null) namesystem.close();
     if(namesystem != null) namesystem.close();
     if(emptier != null) emptier.interrupt();
     if(emptier != null) emptier.interrupt();
     if(server != null) server.stop();
     if(server != null) server.stop();
+    if(serviceRpcServer != null) serviceRpcServer.stop();
     if (myMetrics != null) {
     if (myMetrics != null) {
       myMetrics.shutdown();
       myMetrics.shutdown();
     }
     }

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -129,7 +129,7 @@ public class SecondaryNameNode implements Runnable {
     
     
     // Create connection to the namenode.
     // Create connection to the namenode.
     shouldRun = true;
     shouldRun = true;
-    nameNodeAddr = NameNode.getAddress(conf);
+    nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
 
     this.conf = conf;
     this.conf = conf;
     this.namenode =
     this.namenode =

+ 26 - 5
src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -35,15 +35,26 @@ import org.apache.log4j.Level;
 public class TestDistributedFileSystem extends junit.framework.TestCase {
 public class TestDistributedFileSystem extends junit.framework.TestCase {
   private static final Random RAN = new Random();
   private static final Random RAN = new Random();
 
 
+  private boolean dualPortTesting = false;
+  
+  private HdfsConfiguration getTestConfiguration() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    if (dualPortTesting) {
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+              "localhost:0");
+    }
+    return conf;
+  }
+
   public void testFileSystemCloseAll() throws Exception {
   public void testFileSystemCloseAll() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
     URI address = FileSystem.getDefaultUri(conf);
     URI address = FileSystem.getDefaultUri(conf);
 
 
     try {
     try {
       FileSystem.closeAll();
       FileSystem.closeAll();
 
 
-      conf = new HdfsConfiguration();
+      conf = getTestConfiguration();
       FileSystem.setDefaultUri(conf, address);
       FileSystem.setDefaultUri(conf, address);
       FileSystem.get(conf);
       FileSystem.get(conf);
       FileSystem.get(conf);
       FileSystem.get(conf);
@@ -59,7 +70,7 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
    * multiple files are open.
    * multiple files are open.
    */
    */
   public void testDFSClose() throws Exception {
   public void testDFSClose() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     FileSystem fileSys = cluster.getFileSystem();
 
 
@@ -76,7 +87,7 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
   }
   }
 
 
   public void testDFSClient() throws Exception {
   public void testDFSClient() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
 
 
     try {
     try {
@@ -165,7 +176,7 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
     System.out.println("seed=" + seed);
     System.out.println("seed=" + seed);
     RAN.setSeed(seed);
     RAN.setSeed(seed);
 
 
-    final Configuration conf = new HdfsConfiguration();
+    final Configuration conf = getTestConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
     conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
 
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -228,5 +239,15 @@ public class TestDistributedFileSystem extends junit.framework.TestCase {
         assertEquals(qfoocs, barcs);
         assertEquals(qfoocs, barcs);
       }
       }
     }
     }
+    cluster.shutdown();
+  }
+  
+  public void testAllWithDualPort() throws Exception {
+    dualPortTesting = true;
+
+    testFileSystemCloseAll();
+    testDFSClose();
+    testDFSClient();
+    testFileChecksum();
   }
   }
 }
 }

+ 27 - 5
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java

@@ -83,10 +83,13 @@ public class TestHDFSServerPorts extends TestCase {
     return System.getProperty("test.build.data", "build/test/data");
     return System.getProperty("test.build.data", "build/test/data");
   }
   }
   
   
+  public NameNode startNameNode() throws IOException {
+    return startNameNode(false);
+  }
   /**
   /**
    * Start the namenode.
    * Start the namenode.
    */
    */
-  public NameNode startNameNode() throws IOException {
+  public NameNode startNameNode(boolean withService) throws IOException {
     String dataDir = getTestingDir();
     String dataDir = getTestingDir();
     hdfsDir = new File(dataDir, "dfs");
     hdfsDir = new File(dataDir, "dfs");
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
@@ -96,6 +99,9 @@ public class TestHDFSServerPorts extends TestCase {
     config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
     config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         fileAsURI(new File(hdfsDir, "name1")).toString());
         fileAsURI(new File(hdfsDir, "name1")).toString());
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+    if (withService) {
+      NameNode.setServiceAddress(config, NAME_NODE_HOST + "0");      
+    }
     config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
     config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
     NameNode.format(config);
     NameNode.format(config);
 
 
@@ -239,13 +245,17 @@ public class TestHDFSServerPorts extends TestCase {
     return true;
     return true;
   }
   }
 
 
+  public void testNameNodePorts() throws Exception {
+    runTestNameNodePorts(false);
+    runTestNameNodePorts(true);
+  }
   /**
   /**
    * Verify namenode port usage.
    * Verify namenode port usage.
    */
    */
-  public void testNameNodePorts() throws Exception {
+  public void runTestNameNodePorts(boolean withService) throws Exception {
     NameNode nn = null;
     NameNode nn = null;
     try {
     try {
-      nn = startNameNode();
+      nn = startNameNode(withService);
 
 
       // start another namenode on the same port
       // start another namenode on the same port
       Configuration conf2 = new HdfsConfiguration(config);
       Configuration conf2 = new HdfsConfiguration(config);
@@ -265,7 +275,18 @@ public class TestHDFSServerPorts extends TestCase {
       // different http port
       // different http port
       conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       started = canStartNameNode(conf2);
       started = canStartNameNode(conf2);
-      assertTrue(started); // should start now
+
+      if (withService) {
+        assertFalse("Should've failed on service port", started);
+
+        // reset conf2 since NameNode modifies it
+        FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
+        // Set Service address      
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NAME_NODE_HOST + "0");
+        started = canStartNameNode(conf2);        
+      }
+      assertTrue(started);
     } finally {
     } finally {
       stopNameNode(nn);
       stopNameNode(nn);
     }
     }
@@ -358,7 +379,8 @@ public class TestHDFSServerPorts extends TestCase {
         LOG.info("= Starting 2 on: " + 
         LOG.info("= Starting 2 on: " + 
                                   backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
                                   backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
 
-        assertTrue(canStartBackupNode(backup_config)); // should start now
+        boolean started = canStartBackupNode(backup_config);
+        assertTrue("Backup Namenode should've started", started); // should start now
       } finally {
       } finally {
         stopNameNode(nn);
         stopNameNode(nn);
       }
       }

+ 27 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java

@@ -29,9 +29,7 @@ import org.apache.hadoop.fs.Path;
  * A JUnit test for checking if restarting DFS preserves integrity.
  * A JUnit test for checking if restarting DFS preserves integrity.
  */
  */
 public class TestRestartDFS extends TestCase {
 public class TestRestartDFS extends TestCase {
-  /** check if DFS remains in proper condition after a restart */
-  public void testRestartDFS() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
+  public void runTests(Configuration conf, boolean serviceTest) throws Exception {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
 
 
@@ -44,6 +42,10 @@ public class TestRestartDFS extends TestCase {
     FileStatus dirstatus;
     FileStatus dirstatus;
 
 
     try {
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       cluster = new MiniDFSCluster(conf, 4, true, null);
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       files.createFiles(fs, dir);
       files.createFiles(fs, dir);
@@ -58,8 +60,12 @@ public class TestRestartDFS extends TestCase {
       if (cluster != null) { cluster.shutdown(); }
       if (cluster != null) { cluster.shutdown(); }
     }
     }
     try {
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       // Here we restart the MiniDFScluster without formatting namenode
       // Here we restart the MiniDFScluster without formatting namenode
-      cluster = new MiniDFSCluster(conf, 4, false, null);
+      cluster = new MiniDFSCluster(conf, 4, false, null); 
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       assertTrue("Filesystem corrupted after restart.",
       assertTrue("Filesystem corrupted after restart.",
                  files.checkFiles(fs, dir));
                  files.checkFiles(fs, dir));
@@ -77,6 +83,10 @@ public class TestRestartDFS extends TestCase {
       if (cluster != null) { cluster.shutdown(); }
       if (cluster != null) { cluster.shutdown(); }
     }
     }
     try {
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       // This is a second restart to check that after the first restart
       // This is a second restart to check that after the first restart
       // the image written in parallel to both places did not get corrupted
       // the image written in parallel to both places did not get corrupted
       cluster = new MiniDFSCluster(conf, 4, false, null);
       cluster = new MiniDFSCluster(conf, 4, false, null);
@@ -98,4 +108,17 @@ public class TestRestartDFS extends TestCase {
       if (cluster != null) { cluster.shutdown(); }
       if (cluster != null) { cluster.shutdown(); }
     }
     }
   }
   }
+  /** check if DFS remains in proper condition after a restart */
+  public void testRestartDFS() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    runTests(conf, false);
+  }
+  
+  /** check if DFS remains in proper condition after a restart 
+   * this rerun is with 2 ports enabled for RPC in the namenode
+   */
+   public void testRestartDualPortDFS() throws Exception {
+     final Configuration conf = new HdfsConfiguration();
+     runTests(conf, true);
+   }
 }
 }