Quellcode durchsuchen

HDFS-17167. Add config to startup NameNode as Observer (#6013)

dannytbecker vor 1 Jahr
Ursprung
Commit
07c8b69f7c

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -1177,6 +1177,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
   public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
   public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
+  public static final String  DFS_NAMENODE_OBSERVER_ENABLED_KEY = "dfs.namenode.observer.enabled";
+  public static final boolean DFS_NAMENODE_OBSERVER_ENABLED_DEFAULT = false;
   public static final String  DFS_DATANODE_KEYTAB_FILE_KEY = "dfs.datanode.keytab.file";
   public static final String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY =
       HdfsClientConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -435,7 +435,7 @@ public class BackupNode extends NameNode {
   }
 
   @Override
-  protected HAState createHAState(StartupOption startOpt) {
+  protected HAState createHAState(Configuration conf) {
     return new BackupState();
   }
 

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -163,6 +163,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_BIN
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_OBSERVER_ENABLED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_OBSERVER_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PLUGINS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
@@ -1125,7 +1127,7 @@ public class NameNode extends ReconfigurableBase implements
           + " this namenode/service.", clientNamenodeAddress);
     }
     this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
-    state = createHAState(getStartupOption(conf));
+    state = createHAState(conf);
     this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
     this.haContext = createHAContext();
     try {
@@ -1161,11 +1163,17 @@ public class NameNode extends ReconfigurableBase implements
     }
   }
 
-  protected HAState createHAState(StartupOption startOpt) {
+  protected HAState createHAState(Configuration conf) {
+    StartupOption startOpt = getStartupOption(conf);
     if (!haEnabled || startOpt == StartupOption.UPGRADE
         || startOpt == StartupOption.UPGRADEONLY) {
       return ACTIVE_STATE;
-    } else if (startOpt == StartupOption.OBSERVER) {
+    } else if (conf.getBoolean(DFS_NAMENODE_OBSERVER_ENABLED_KEY,
+          DFS_NAMENODE_OBSERVER_ENABLED_DEFAULT)
+        || startOpt == StartupOption.OBSERVER) {
+      // Set Observer state using config instead of startup option
+      // This allows other startup options to be used when starting observer.
+      // e.g. rollingUpgrade
       return OBSERVER_STATE;
     } else {
       return STANDBY_STATE;

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -2801,6 +2801,15 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.observer.enabled</name>
+  <value>false</value>
+  <description>
+    This causes NameNode on startup to become an observer node if
+    set to true, otherwise startup is no different.
+  </description>
+</property>
+
 <property>
   <name>dfs.namenode.enable.retrycache</name>
   <value>true</value>

+ 60 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_OBSERVER_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getServiceState;
 import static org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider.*;
@@ -215,6 +216,65 @@ public class TestObserverNode {
     assertSentTo(0);
   }
 
+  @Test
+  public void testConfigStartup() throws Exception {
+    int nnIdx = dfsCluster.getNumNameNodes() - 1;
+
+    // Transition all current observers to standby
+    for (int i = 0; i < dfsCluster.getNumNameNodes(); i++) {
+      if (dfsCluster.getNameNode(i).isObserverState()) {
+        dfsCluster.transitionToStandby(i);
+      }
+    }
+
+    // Confirm that the namenode at nnIdx is standby
+    assertTrue("The NameNode is observer despite being transitioned to standby",
+        dfsCluster.getNameNode(nnIdx).isStandbyState());
+
+    // Restart the NameNode with observer startup option as false
+    dfsCluster.getConfiguration(nnIdx)
+        .setBoolean(DFS_NAMENODE_OBSERVER_ENABLED_KEY, false);
+    dfsCluster.restartNameNode(nnIdx);
+
+    // Verify that the NameNode is not in Observer state
+    dfsCluster.waitNameNodeUp(nnIdx);
+    assertTrue("The NameNode started as Observer despite "
+        + DFS_NAMENODE_OBSERVER_ENABLED_KEY + " being false",
+        dfsCluster.getNameNode(nnIdx).isStandbyState());
+
+    dfs.mkdir(testPath, FsPermission.getDefault());
+    assertSentTo(0);
+
+    // The first request goes to the active because it has not refreshed yet;
+    // the second would go to the observer if it was not in standby
+    dfsCluster.rollEditLogAndTail(0);
+    dfs.getFileStatus(testPath);
+    dfs.getFileStatus(testPath);
+    assertSentTo(0);
+
+    Path testPath2 = new Path(testPath, "test2");
+    // Restart the NameNode with the observer startup option as true
+    dfsCluster.getConfiguration(nnIdx)
+        .setBoolean(DFS_NAMENODE_OBSERVER_ENABLED_KEY, true);
+    dfsCluster.restartNameNode(nnIdx);
+
+    // Check that the NameNode is in Observer state
+    dfsCluster.waitNameNodeUp(nnIdx);
+    assertTrue("The NameNode did not start as Observer despite "
+        + DFS_NAMENODE_OBSERVER_ENABLED_KEY + " being true",
+        dfsCluster.getNameNode(nnIdx).isObserverState());
+
+    dfs.mkdir(testPath2, FsPermission.getDefault());
+    assertSentTo(0);
+
+    // The first request goes to the active because it has not refreshed yet;
+    // the second will properly go to the observer
+    dfsCluster.rollEditLogAndTail(0);
+    dfs.getFileStatus(testPath2);
+    dfs.getFileStatus(testPath2);
+    assertSentTo(nnIdx);
+  }
+
   @Test
   public void testFailover() throws Exception {
     Path testPath2 = new Path(testPath, "test2");