瀏覽代碼

HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException. Contributed by Xiao Chen.

cnauroth 9 年之前
父節點
當前提交
99cf2ecee9

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -2547,6 +2547,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9458. TestBackupNode always binds to port 50070, which can cause bind
     failures. (Xiao Chen via cnauroth)
 
+    HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException.
+    (Xiao Chen via cnauroth)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -1021,6 +1021,22 @@ public class NameNode implements NameNodeStatusMXBean {
     return httpServer.getHttpsAddress();
   }
 
+  /**
+   * @return NameNodeHttpServer, used by unit tests to ensure a full shutdown,
+   * so that no bind exception is thrown during restart.
+   */
+  @VisibleForTesting
+  public void joinHttpServer() {
+    if (httpServer != null) {
+      try {
+        httpServer.join();
+      } catch (InterruptedException e) {
+        LOG.info("Caught InterruptedException joining NameNodeHttpServer", e);
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
   /**
    * Verify that configured directories exist, then
    * Interactively confirm that formatting is desired 

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -194,6 +194,15 @@ public class NameNodeHttpServer {
     return params;
   }
 
+  /**
+   * Joins the httpserver.
+   */
+  public void join() throws InterruptedException {
+    if (httpServer != null) {
+      httpServer.join();
+    }
+  }
+
   void stop() throws Exception {
     if (httpServer != null) {
       httpServer.stop();

+ 18 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -1908,12 +1908,7 @@ public class MiniDFSCluster {
     shutdownDataNodes();
     for (NameNodeInfo nnInfo : namenodes.values()) {
       if (nnInfo == null) continue;
-      NameNode nameNode = nnInfo.nameNode;
-      if (nameNode != null) {
-        nameNode.stop();
-        nameNode.join();
-        nameNode = null;
-      }
+      stopAndJoinNameNode(nnInfo.nameNode);
     }
     ShutdownHookManager.get().clearShutdownHooks();
     if (base_dir != null) {
@@ -1953,17 +1948,25 @@ public class MiniDFSCluster {
    */
   public synchronized void shutdownNameNode(int nnIndex) {
     NameNodeInfo info = getNN(nnIndex);
-    NameNode nn = info.nameNode;
-    if (nn != null) {
-      LOG.info("Shutting down the namenode");
-      nn.stop();
-      nn.join();
-      info.nnId = null;
-      info.nameNode = null;
-      info.nameserviceId = null;
+    stopAndJoinNameNode(info.nameNode);
+    info.nnId = null;
+    info.nameNode = null;
+    info.nameserviceId = null;
+  }
+
+  /**
+   * Fully stop the NameNode by stop and join.
+   */
+  private void stopAndJoinNameNode(NameNode nn) {
+    if (nn == null) {
+      return;
     }
+    LOG.info("Shutting down the namenode");
+    nn.stop();
+    nn.join();
+    nn.joinHttpServer();
   }
-  
+
   /**
    * Restart all namenodes.
    */

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java

@@ -444,6 +444,7 @@ public class TestStartup {
     nnRpc.saveNamespace(0, 0);
     namenode.stop();
     namenode.join();
+    namenode.joinHttpServer();
 
     // compress image using default codec
     LOG.info("Read an uncomressed image and store it compressed using default codec.");
@@ -474,6 +475,7 @@ public class TestStartup {
     nnRpc.saveNamespace(0, 0);
     namenode.stop();
     namenode.join();
+    namenode.joinHttpServer();
   }
   
   @Test