Browse Source

AMBARI-15790 Clean up stack scripts that refer to dfs.nameservices to use dfs.internal.nameservices as first option (dsen)

Dmytro Sen 9 năm trước cách đây
mục cha
commit
02627054ed
24 tập tin đã thay đổi với 525 bổ sung26 xóa
  1. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
  2. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java
  3. 3 3
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
  4. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
  5. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertUri.java
  6. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
  7. 3 1
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py
  8. 1 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
  9. 1 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
  10. 5 2
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
  11. 3 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  12. 1 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
  13. 3 1
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
  14. 4 1
      ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
  15. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/alert_ha_namenode_health.py
  16. 4 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
  17. 3 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  18. 3 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
  19. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java
  20. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
  21. 113 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  22. 2 2
      ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
  23. 366 0
      ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
  24. 1 1
      ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java

@@ -115,7 +115,7 @@ public enum CheckDescription {
       "NameNode High Availability must be enabled",
       new HashMap<String, String>() {{
         put(AbstractCheckDescriptor.DEFAULT,
-          "NameNode High Availability is not enabled. Verify that dfs.nameservices property is present in hdfs-site.xml.");
+          "NameNode High Availability is not enabled. Verify that dfs.internal.nameservices property is present in hdfs-site.xml.");
       }}),
 
   SERVICES_NAMENODE_TRUNCATE(PrereqCheckType.SERVICE,

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java

@@ -58,7 +58,7 @@ public class ServicesNamenodeHighAvailabilityCheck extends AbstractCheckDescript
     final Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
     final DesiredConfig desiredConfig = desiredConfigs.get(configType);
     final Config config = cluster.getConfig(configType, desiredConfig.getTag());
-    if (!config.getProperties().containsKey("dfs.nameservices")) {
+    if (!config.getProperties().containsKey("dfs.internal.nameservices")) {
       prerequisiteCheck.getFailedOn().add("HDFS");
       prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
       prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));

+ 3 - 3
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java

@@ -602,7 +602,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
    */
   String[] getPortProperties(Service.Type service, String componentName, String hostName, Map<String, Object> properties, boolean httpsEnabled) {
     componentName = httpsEnabled ? componentName + "-HTTPS" : componentName;
-    if(componentName.startsWith("NAMENODE") && properties.containsKey("dfs.nameservices")) {
+    if(componentName.startsWith("NAMENODE") && properties.containsKey("dfs.internal.nameservices")) {
       componentName += "-HA";
       return getNamenodeHaProperty(properties, serviceDesiredProperties.get(service).get(componentName), hostName);
     }
@@ -611,7 +611,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
 
   private String[] getNamenodeHaProperty(Map<String, Object> properties, String pattern[], String hostName) {
     // iterate over nameservices and namenodes, to find out namenode http(s) property for concrete host
-    for(String nameserviceId : ((String)properties.get("dfs.nameservices")).split(",")) {
+    for(String nameserviceId : ((String)properties.get("dfs.internal.nameservices")).split(",")) {
       if(properties.containsKey("dfs.ha.namenodes."+nameserviceId)) {
         for (String namenodeId : ((String)properties.get("dfs.ha.namenodes." + nameserviceId)).split(",")) {
           String propertyName = String.format(
@@ -1235,7 +1235,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
           configVersion,
           serviceConfigTypes.get(componentServiceMap.get(componentName))
         );
-        if (configProperties.containsKey("dfs.nameservices")) {
+        if (configProperties.containsKey("dfs.internal.nameservices")) {
           componentName += "-HA";
           keys = jmxDesiredRpcSuffixProperties.get(componentName);
           Map<String, String[]> stringMap = jmxDesiredRpcSuffixProperties.get(componentName);

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java

@@ -258,7 +258,7 @@ public class MasterHostResolver {
     Map<Status, String> stateToHost = new HashMap<Status, String>();
     Cluster cluster = getCluster();
 
-    String nameService = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, "dfs.nameservices");
+    String nameService = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, "dfs.internal.nameservices");
     if (nameService == null || nameService.isEmpty()) {
       return null;
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertUri.java

@@ -187,7 +187,7 @@ public class AlertUri {
    *
    * <pre>
    * high_availability": {
-   *   "nameservice": "{{hdfs-site/dfs.nameservices}}",
+   *   "nameservice": "{{hdfs-site/dfs.internal.nameservices}}",
    *   "alias_key" : "dfs.ha.namenodes.{{ha-nameservice}}",
    *   "http_pattern" : "dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}",
    *   "https_pattern" : "dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}"

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java

@@ -190,7 +190,8 @@ public class ClusterTopologyImpl implements ClusterTopology {
 
   public static boolean isNameNodeHAEnabled(Map<String, Map<String, String>> configurationProperties) {
     return configurationProperties.containsKey("hdfs-site") &&
-        configurationProperties.get("hdfs-site").containsKey("dfs.nameservices");
+           (configurationProperties.get("hdfs-site").containsKey("dfs.nameservices") ||
+            configurationProperties.get("hdfs-site").containsKey("dfs.internal.nameservices"));
   }
 
   @Override

+ 3 - 1
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py

@@ -65,7 +65,9 @@ security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-dfs_nameservice = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_nameservice = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_nameservice is None:
+ dfs_nameservice = default('/configurations/hdfs-site/dfs.nameservices', None)
 
 # HDFSResource partial function
 HdfsResource = functools.partial(HdfsResource,

+ 1 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py

@@ -36,7 +36,7 @@ HDFS_NN_STATE_ACTIVE = 'active'
 HDFS_NN_STATE_STANDBY = 'standby'
 
 HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.nameservices}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
 NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
 NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
 DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'

+ 1 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py

@@ -42,7 +42,7 @@ HDFS_NN_STATE_ACTIVE = 'active'
 HDFS_NN_STATE_STANDBY = 'standby'
 
 HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.nameservices}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
 NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
 NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
 DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'

+ 5 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py

@@ -41,9 +41,12 @@ class NamenodeHAState:
     """
     import params
 
-    self.name_service = default("/configurations/hdfs-site/dfs.nameservices", None)
+    self.name_service = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+    if self.name_service is None:
+      self.name_service = default('/configurations/hdfs-site/dfs.nameservices', None)
+
     if not self.name_service:
-      raise ValueError("Could not retrieve property dfs.nameservices")
+      raise ValueError("Could not retrieve property dfs.nameservices or dfs.internal.nameservices")
 
     nn_unique_ids_key = "dfs.ha.namenodes." + str(self.name_service)
     # List of the nn unique ids

+ 3 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -258,7 +258,9 @@ data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hi
 
 # HDFS High Availability properties
 dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
 dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
 

+ 1 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py

@@ -42,7 +42,7 @@ hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 # HDFS High Availability properties
 dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
 
 namenode_id = None

+ 3 - 1
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py

@@ -100,7 +100,9 @@ stack_version_unformatted = config['hostLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
 
 namenode_rpc = None

+ 4 - 1
ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py

@@ -65,7 +65,10 @@ is_hive_installed = default("/clusterHostInfo/hive_server_host", None) is not No
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 namenode_path =  default('/configurations/hdfs-site/dfs.namenode.http-address', None)
-dfs_nameservice = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_nameservice = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_nameservice is None:
+  dfs_nameservice = default('/configurations/hdfs-site/dfs.nameservices', None)
+
 if dfs_nameservice:
   namenode_path =  get_active_namenode(hdfs_site, security_enabled, hdfs_user)[1]
 

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/alert_ha_namenode_health.py

@@ -34,7 +34,7 @@ HDFS_NN_STATE_ACTIVE = 'active'
 HDFS_NN_STATE_STANDBY = 'standby'
 
 HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.nameservices}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.internal.nameservices}}'
 NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
 NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
 DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'

+ 4 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py

@@ -149,7 +149,10 @@ dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
 
 # HDFS High Availability properties
 dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
 
 namenode_id = None

+ 3 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -251,7 +251,9 @@ namenode_id = None
 namenode_rpc = None
 
 dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
 
 dfs_ha_namemodes_ids_list = []

+ 3 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py

@@ -294,7 +294,9 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     # Check if NN HA is enabled and recommend removing dfs.namenode.rpc-address
     hdfsSiteProperties = getServicesSiteProperties(services, "hdfs-site")
     nameServices = None
-    if hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:
+    if hdfsSiteProperties and 'dfs.internal.nameservices' in hdfsSiteProperties:
+      nameServices = hdfsSiteProperties['dfs.internal.nameservices']
+    if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:
       nameServices = hdfsSiteProperties['dfs.nameservices']
     if nameServices and "dfs.ha.namenodes.%s" % nameServices in hdfsSiteProperties:
       namenodes = hdfsSiteProperties["dfs.ha.namenodes.%s" % nameServices]

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java

@@ -96,7 +96,7 @@ public class ServicesNamenodeHighAvailabilityCheckTest {
     servicesNamenodeHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster"));
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
-    properties.put("dfs.nameservices", "anything");
+    properties.put("dfs.internal.nameservices", "anything");
     check = new PrerequisiteCheck(null, null);
     servicesNamenodeHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster"));
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java

@@ -362,7 +362,7 @@ public class JMXHostProviderTest {
 
     // Create configs
     Map<String, String> configs = new HashMap<String, String>();
-    configs.put("dfs.nameservices", "ns");
+    configs.put("dfs.internal.nameservices", "ns");
     configs.put("dfs.namenode.http-address", "h1:50070");
     configs.put("dfs.namenode.http-address.ns.nn1", "h1:50071");
     configs.put("dfs.namenode.http-address.ns.nn2", "h2:50072");

+ 113 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -1050,6 +1050,119 @@ class TestNamenode(RMFTestCase):
       call('hdfs namenode -bootstrapStandby -nonInteractive -force', logoutput=False, user=u'hdfs')]
     call_mocks.assert_has_calls(calls, any_order=True)
 
+  @patch.object(shell, "call")
+  def test_start_ha_bootstrap_standby_from_blueprint_initial_start_dfs_nameservices(self, call_mocks):
+
+    call_mocks = MagicMock()
+    call_mocks.side_effect = [(1, None), (0, None), (0, None)]
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="ha_bootstrap_standby_node_initial_start_dfs_nameservices.json",
+                       stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES,
+                       call_mocks = call_mocks
+    )
+    self.assert_configure_default()
+
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755
+    )
+
+    # TODO: Using shell.call() to bootstrap standby which is patched to return status code '5' (i.e. already bootstrapped)
+    # Need to update the test case to verify that the standby case is detected, and that the bootstrap
+    # command is run before the namenode launches
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              create_parents = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              create_parents = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid",
+                              )
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+                              environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
+                              not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid",
+                              )
+    self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6402.ambari.apache.org:8020 -safemode get | grep 'Safe mode is OFF'",
+                              tries=115,
+                              try_sleep=10,
+                              user="hdfs",
+                              logoutput=True
+    )
+    self.assertResourceCalled('HdfsResource', '/tmp',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              keytab = UnknownConfigurationMock(),
+                              hadoop_bin_dir = '/usr/bin',
+                              default_fs = 'hdfs://ns1',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = None,
+                              user = 'hdfs',
+                              dfs_type = '',
+                              owner = 'hdfs',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              mode = 0777,
+                              )
+    self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              keytab = UnknownConfigurationMock(),
+                              hadoop_bin_dir = '/usr/bin',
+                              default_fs = 'hdfs://ns1',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = None,
+                              user = 'hdfs',
+                              dfs_type = '',
+                              owner = 'ambari-qa',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              mode = 0770,
+                              )
+    self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              keytab = UnknownConfigurationMock(),
+                              hadoop_bin_dir = '/usr/bin',
+                              default_fs = 'hdfs://ns1',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = None,
+                              user = 'hdfs',
+                              dfs_type = '',
+                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              )
+    self.assertNoMoreResources()
+    self.assertTrue(call_mocks.called)
+    self.assertEqual(3, call_mocks.call_count)
+    calls = [
+      call("ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'"),
+      call('hdfs namenode -bootstrapStandby -nonInteractive -force', logoutput=False, user=u'hdfs'),
+      call('hdfs namenode -bootstrapStandby -nonInteractive -force', logoutput=False, user=u'hdfs')]
+    call_mocks.assert_has_calls(calls, any_order=True)
+
   def test_decommission_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json

@@ -170,8 +170,8 @@
             "dfs.namenode.stale.datanode.interval": "30000", 
             "dfs.datanode.ipc.address": "0.0.0.0:8010", 
             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
-            "dfs.nameservices": "ns1", 
-            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.internal.nameservices": "ns1",
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data",
             "dfs.namenode.https-address.ns1.nn2": "c6402.ambari.apache.org:50470", 
             "dfs.webhdfs.enabled": "true", 
             "dfs.namenode.https-address.ns1.nn1": "c6401.ambari.apache.org:50470", 

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 366 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json


+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json

@@ -224,7 +224,7 @@
             "dfs.namenode.stale.datanode.interval": "30000", 
             "dfs.datanode.ipc.address": "0.0.0.0:8010", 
             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
-            "dfs.nameservices": "nn1", 
+            "dfs.internal.nameservices": "nn1",
             "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
             "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
             "dfs.webhdfs.enabled": "true", 

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác