浏览代码

AMBARI-5544. Deleted slave components stick around from the service's perspective (dlysnichenko)

Lisnichenko Dmitro 10 年之前
父节点
当前提交
f04b03941c
共有 16 个文件被更改,包括 1030 次插入52 次删除
  1. 44 17
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
  2. 54 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  3. 15 14
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
  4. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
  5. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
  6. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
  7. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
  8. 15 14
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
  9. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  10. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
  11. 83 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  12. 1 1
      ambari-server/src/test/python/TestAmbariServer.py
  13. 13 0
      ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
  14. 362 0
      ambari-server/src/test/python/stacks/1.3.2/configs/default_update_exclude_file_only.json
  15. 14 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  16. 423 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json

+ 44 - 17
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java

@@ -103,7 +103,7 @@ public class AmbariCustomCommandExecutionHelper {
   private final static Logger LOG =
   private final static Logger LOG =
       LoggerFactory.getLogger(AmbariCustomCommandExecutionHelper.class);
       LoggerFactory.getLogger(AmbariCustomCommandExecutionHelper.class);
   // TODO: Remove the hard-coded mapping when stack definition indicates which slave types can be decommissioned
   // TODO: Remove the hard-coded mapping when stack definition indicates which slave types can be decommissioned
-  private static final Map<String, String> masterToSlaveMappingForDecom = new HashMap<String, String>();
+  public static final Map<String, String> masterToSlaveMappingForDecom = new HashMap<String, String>();
 
 
   static {
   static {
     masterToSlaveMappingForDecom.put("NAMENODE", "DATANODE");
     masterToSlaveMappingForDecom.put("NAMENODE", "DATANODE");
@@ -112,11 +112,11 @@ public class AmbariCustomCommandExecutionHelper {
     masterToSlaveMappingForDecom.put("JOBTRACKER", "TASKTRACKER");
     masterToSlaveMappingForDecom.put("JOBTRACKER", "TASKTRACKER");
   }
   }
 
 
-  private static String DECOM_INCLUDED_HOSTS = "included_hosts";
-  private static String DECOM_EXCLUDED_HOSTS = "excluded_hosts";
-  private static String DECOM_SLAVE_COMPONENT = "slave_type";
-  private static String HBASE_MARK_DRAINING_ONLY = "mark_draining_only";
-  private static String UPDATE_EXCLUDE_FILE_ONLY = "update_exclude_file_only";
+  public static String DECOM_INCLUDED_HOSTS = "included_hosts";
+  public static String DECOM_EXCLUDED_HOSTS = "excluded_hosts";
+  public static String DECOM_SLAVE_COMPONENT = "slave_type";
+  public static String HBASE_MARK_DRAINING_ONLY = "mark_draining_only";
+  public static String UPDATE_EXCLUDE_FILE_ONLY = "update_exclude_file_only";
   private static String ALIGN_MAINTENANCE_STATE = "align_maintenance_state";
   private static String ALIGN_MAINTENANCE_STATE = "align_maintenance_state";
   @Inject
   @Inject
   private ActionMetadata actionMetadata;
   private ActionMetadata actionMetadata;
@@ -138,7 +138,7 @@ public class AmbariCustomCommandExecutionHelper {
   private OsFamily os_family;
   private OsFamily os_family;
 
 
   protected static final String SERVICE_CHECK_COMMAND_NAME = "SERVICE_CHECK";
   protected static final String SERVICE_CHECK_COMMAND_NAME = "SERVICE_CHECK";
-  protected static final String DECOMMISSION_COMMAND_NAME = "DECOMMISSION";
+  public static final String DECOMMISSION_COMMAND_NAME = "DECOMMISSION";
 
 
 
 
   private Boolean isServiceCheckCommand(String command, String service) {
   private Boolean isServiceCheckCommand(String command, String service) {
@@ -648,16 +648,43 @@ public class AmbariCustomCommandExecutionHelper {
     }
     }
 
 
     // Filtering hosts based on Maintenance State
     // Filtering hosts based on Maintenance State
-    MaintenanceStateHelper.HostPredicate hostPredicate =
-      new MaintenanceStateHelper.HostPredicate() {
-        @Override
-        public boolean shouldHostBeRemoved(final String hostname)
-                throws AmbariException {
-          return ! maintenanceStateHelper.isOperationAllowed(
-                  cluster, actionExecutionContext.getOperationLevel(),
-                  resourceFilter, serviceName, slaveCompType, hostname);
-        }
-    };
+    MaintenanceStateHelper.HostPredicate hostPredicate
+            = new MaintenanceStateHelper.HostPredicate() {
+              @Override
+              public boolean shouldHostBeRemoved(final String hostname)
+              throws AmbariException {
+                //Get UPDATE_EXCLUDE_FILE_ONLY parameter as string
+                String upd_excl_file_only_str = actionExecutionContext.getParameters()
+                .get(UPDATE_EXCLUDE_FILE_ONLY);
+              
+                String decom_incl_hosts_str = actionExecutionContext.getParameters()
+                .get(DECOM_INCLUDED_HOSTS);                
+                if ((upd_excl_file_only_str != null &&
+                        !upd_excl_file_only_str.trim().equals(""))){
+                  upd_excl_file_only_str = upd_excl_file_only_str.trim();
+                }
+                 
+                boolean upd_excl_file_only = false;
+                //Parse of possible forms of value
+                if (upd_excl_file_only_str != null &&
+                        !upd_excl_file_only_str.equals("") &&
+                        (upd_excl_file_only_str.equals("\"true\"") 
+                        || upd_excl_file_only_str.equals("'true'") 
+                        || upd_excl_file_only_str.equals("true"))){
+                  upd_excl_file_only = true;
+                }
+
+                // If we just clear *.exclude and component have been already removed we will skip check
+                if (upd_excl_file_only && decom_incl_hosts_str != null
+                        && !decom_incl_hosts_str.trim().equals("")) {
+                  return upd_excl_file_only;
+                } else {
+                  return !maintenanceStateHelper.isOperationAllowed(
+                          cluster, actionExecutionContext.getOperationLevel(),
+                          resourceFilter, serviceName, slaveCompType, hostname);
+                }
+              }
+            };
     // Filter excluded hosts
     // Filter excluded hosts
     Set<String> filteredExcludedHosts = new HashSet<String>(excludedHosts);
     Set<String> filteredExcludedHosts = new HashSet<String>(excludedHosts);
     Set<String> ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(
     Set<String> ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(

+ 54 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -144,6 +144,8 @@ import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.Injector;
 import com.google.inject.Singleton;
 import com.google.inject.Singleton;
 import com.google.inject.persist.Transactional;
 import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.HostComponentAdminState;
 
 
 @Singleton
 @Singleton
 public class AmbariManagementControllerImpl implements AmbariManagementController {
 public class AmbariManagementControllerImpl implements AmbariManagementController {
@@ -2584,7 +2586,59 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry
     for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry
             : safeToRemoveSCHs.entrySet()) {
             : safeToRemoveSCHs.entrySet()) {
       for (ServiceComponentHost componentHost : entry.getValue()) {
       for (ServiceComponentHost componentHost : entry.getValue()) {
+        String included_hostname = componentHost.getHostName();
+        String serviceName = entry.getKey().getServiceName();
+        String master_component_name = null;
+        String slave_component_name = componentHost.getServiceComponentName();
+        HostComponentAdminState desiredAdminState = componentHost.getComponentAdminState();
+        State slaveState = componentHost.getState();
+        //Delete hostcomponents
         entry.getKey().deleteServiceComponentHosts(componentHost.getHostName());
         entry.getKey().deleteServiceComponentHosts(componentHost.getHostName());
+        // If deleted hostcomponents support decomission and were decommited and stopped 
+        if (AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.containsValue(slave_component_name) 
+                && desiredAdminState.equals(HostComponentAdminState.DECOMMISSIONED)
+                && slaveState.equals(State.INSTALLED)) {
+
+          for (Entry<String, String> entrySet : AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.entrySet()) {
+            if (entrySet.getValue().equals(slave_component_name)) {
+              master_component_name = entrySet.getKey();
+            }
+          }
+          //Clear exclud file or draining list except HBASE
+          if (!serviceName.equals(Service.Type.HBASE.toString())) {
+            HashMap<String, String> requestProperties = new HashMap<String, String>();
+            requestProperties.put("context", "Remove host " + 
+                    included_hostname + " from exclude file");
+            requestProperties.put("exclusive", "true");
+            HashMap<String, String> params = new HashMap<String, String>();
+            params.put("included_hosts", included_hostname);
+            params.put("slave_type", slave_component_name);
+            params.put(AmbariCustomCommandExecutionHelper.UPDATE_EXCLUDE_FILE_ONLY, "true");
+
+            //Create filter for RECOMISSION command
+            RequestResourceFilter resourceFilter
+                    = new RequestResourceFilter(serviceName, master_component_name, null);
+            //Create request for RECOMISSION command
+            ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+                    entry.getKey().getClusterName(), AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
+                    Collections.singletonList(resourceFilter), null, params, true);
+            //Send request
+            createAction(actionRequest, requestProperties);
+          }
+         
+          //Mark master component as needed to restart for remove host info from components UI
+          Cluster cluster = clusters.getCluster(entry.getKey().getClusterName());
+          Service service = cluster.getService(serviceName);
+          ServiceComponent sc = service.getServiceComponent(master_component_name);
+
+          if (sc != null && sc.isMasterComponent()) {
+            for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
+              sch.setRestartRequired(true);
+            }
+          }
+
+        }
+
       }
       }
     }
     }
 
 

+ 15 - 14
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py

@@ -143,18 +143,19 @@ def decommission():
        group=user_group
        group=user_group
   )
   )
   
   
-  Execute(nn_kinit_cmd,
-          user=hdfs_user
-  )
+  if not params.update_exclude_file_only:
+    Execute(nn_kinit_cmd,
+            user=hdfs_user
+    )
 
 
-  if params.dfs_ha_enabled:
-    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-    # need to execute each command scoped to a particular namenode
-    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-  else:
-    nn_refresh_cmd = format('dfsadmin -refreshNodes')
-  ExecuteHadoop(nn_refresh_cmd,
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True,
-                bin_dir=params.hadoop_bin_dir)
+    if params.dfs_ha_enabled:
+      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+      # need to execute each command scoped to a particular namenode
+      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+    else:
+      nn_refresh_cmd = format('dfsadmin -refreshNodes')
+    ExecuteHadoop(nn_refresh_cmd,
+                  user=hdfs_user,
+                  conf_dir=conf_dir,
+                  kinit_override=True,
+                  bin_dir=params.hadoop_bin_dir)

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py

@@ -55,7 +55,7 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 #hosts

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py

@@ -164,7 +164,7 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local,
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
   bin_dir = hadoop_bin_dir
 )
 )
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py

@@ -35,7 +35,7 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 #exclude file
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 #hosts

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py

@@ -40,7 +40,7 @@ user_group = config['configurations']['cluster-env']['user_group']
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 mapred_log_dir_prefix = hdfs_log_dir_prefix
 mapred_log_dir_prefix = hdfs_log_dir_prefix
 mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
 
 hadoop_jar_location = "/usr/lib/hadoop/"
 hadoop_jar_location = "/usr/lib/hadoop/"
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser = config['configurations']['cluster-env']['smokeuser']

+ 15 - 14
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py

@@ -149,18 +149,19 @@ def decommission():
        group=user_group
        group=user_group
   )
   )
   
   
-  Execute(nn_kinit_cmd,
-          user=hdfs_user
-  )
+  if not params.update_exclude_file_only:
+    Execute(nn_kinit_cmd,
+            user=hdfs_user
+    )
 
 
-  if params.dfs_ha_enabled:
-    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-    # need to execute each command scoped to a particular namenode
-    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-  else:
-    nn_refresh_cmd = format('dfsadmin -refreshNodes')
-  ExecuteHadoop(nn_refresh_cmd,
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True,
-                bin_dir=params.hadoop_bin_dir)
+    if params.dfs_ha_enabled:
+      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+      # need to execute each command scoped to a particular namenode
+      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+    else:
+      nn_refresh_cmd = format('dfsadmin -refreshNodes')
+    ExecuteHadoop(nn_refresh_cmd,
+                  user=hdfs_user,
+                  conf_dir=conf_dir,
+                  kinit_override=True,
+                  bin_dir=params.hadoop_bin_dir)

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -89,7 +89,7 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
 #exclude file
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 #hosts

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py

@@ -166,7 +166,7 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local,
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
   bin_dir = hadoop_bin_dir
 )
 )
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
 
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 
 

+ 83 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -8302,6 +8302,89 @@ public class AmbariManagementControllerTest {
     controller.deleteHostComponents(schRequests);
     controller.deleteHostComponents(schRequests);
   }
   }
 
 
+  @Test
+  public void testDeleteHostComponentInDecomissionState() throws Exception {
+    String clusterName = "foo1";
+    createCluster(clusterName);
+    clusters.getCluster(clusterName)
+        .setDesiredStackVersion(new StackId("HDP-1.3.1"));
+    String serviceName = "HDFS";
+    String mapred = "MAPREDUCE";
+    createService(clusterName, serviceName, null);
+    createService(clusterName, mapred, null);
+    String componentName1 = "NAMENODE";
+    String componentName2 = "DATANODE";
+    String componentName3 = "HDFS_CLIENT";
+    String componentName4 = "JOBTRACKER";
+    String componentName5 = "TASKTRACKER";
+    String componentName6 = "MAPREDUCE_CLIENT";
+
+    createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName3, State.INIT);
+    createServiceComponent(clusterName, mapred, componentName4, State.INIT);
+    createServiceComponent(clusterName, mapred, componentName5, State.INIT);
+    createServiceComponent(clusterName, mapred, componentName6, State.INIT);
+
+    String host1 = "h1";
+
+    addHost(host1, clusterName);
+
+    createServiceComponentHost(clusterName, serviceName, componentName1, host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);
+    createServiceComponentHost(clusterName, mapred, componentName4, host1, null);
+    createServiceComponentHost(clusterName, mapred, componentName5, host1, null);
+    createServiceComponentHost(clusterName, mapred, componentName6, host1, null);
+
+    // Install
+    installService(clusterName, serviceName, false, false);
+    installService(clusterName, mapred, false, false);
+
+    Cluster cluster = clusters.getCluster(clusterName);
+    Service s1 = cluster.getService(serviceName);
+    Service s2 = cluster.getService(mapred);
+    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
+    sc1.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);
+
+    Set<ServiceComponentHostRequest> schRequests = new HashSet<ServiceComponentHostRequest>();
+    // delete HC
+    schRequests.clear();
+    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null));
+    try {
+      controller.deleteHostComponents(schRequests);
+      Assert.fail("Expect failure while deleting.");
+    } catch (Exception ex) {
+      Assert.assertTrue(ex.getMessage().contains(
+          "Host Component cannot be removed"));
+    }
+
+    sc1.getServiceComponentHosts().values().iterator().next().setDesiredState(State.STARTED);
+    sc1.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);
+    ServiceComponent sc2 = s1.getServiceComponent(componentName2);
+    sc2.getServiceComponentHosts().values().iterator().next().setState(State.INSTALLED);
+    sc2.getServiceComponentHosts().values().iterator().next().setMaintenanceState(MaintenanceState.ON);
+    sc2.getServiceComponentHosts().values().iterator().next().setComponentAdminState(HostComponentAdminState.DECOMMISSIONED);
+    ServiceComponent sc3 = s1.getServiceComponent(componentName3);
+    sc3.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);
+    ServiceComponent sc4 = s2.getServiceComponent(componentName4);
+    sc4.getServiceComponentHosts().values().iterator().next().setDesiredState(State.STARTED);
+    sc4.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);
+    ServiceComponent sc5 = s2.getServiceComponent(componentName5);
+    sc5.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);
+    ServiceComponent sc6 = s2.getServiceComponent(componentName6);
+    sc6.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);
+
+    schRequests.clear();
+    //schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null));
+    ServiceComponentHostRequest schr2 = new ServiceComponentHostRequest(clusterName, serviceName, componentName2, host1, null);
+    schr2.setAdminState("DECOMMISSIONED");
+    schRequests.add(schr2);
+    controller.deleteHostComponents(schRequests);
+    ServiceComponentHost sch = sc1.getServiceComponentHost(host1);
+    assertTrue(sch.isRestartRequired());
+  }  
+  
   @Test
   @Test
   public void testDeleteHost() throws Exception {
   public void testDeleteHost() throws Exception {
     String clusterName = "foo1";
     String clusterName = "foo1";

+ 1 - 1
ambari-server/src/test/python/TestAmbariServer.py

@@ -2133,7 +2133,7 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     pg_status, retcode, out, err = ambari_server.check_postgre_up()
     pg_status, retcode, out, err = ambari_server.check_postgre_up()
     self.assertEqual(0, retcode)
     self.assertEqual(0, retcode)
 
 
-    ambari_server.OS = 'suse'
+    ambari_server.OS_TYPE = OSConst.OS_SUSE
     p.poll.return_value = 4
     p.poll.return_value = 4
     get_postgre_status_mock.return_value = "stopped", 0, "", ""
     get_postgre_status_mock.return_value = "stopped", 0, "", ""
     pg_status, retcode, out, err = ambari_server.check_postgre_up()
     pg_status, retcode, out, err = ambari_server.check_postgre_up()

+ 13 - 0
ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py

@@ -230,6 +230,19 @@ class TestNamenode(RMFTestCase):
         user = 'hdfs',
         user = 'hdfs',
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
+
+  def test_decommission_update_exclude_file_only(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "decommission",
+                       config_file="default_update_exclude_file_only.json"
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+        owner = 'hdfs',
+        content = Template('exclude_hosts_list.j2'),
+        group = 'hadoop',
+    )
+    self.assertNoMoreResources()
     
     
   def test_decommission_secured(self):
   def test_decommission_secured(self):
     self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
     self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",

文件差异内容过多而无法显示
+ 362 - 0
ambari-server/src/test/python/stacks/1.3.2/configs/default_update_exclude_file_only.json


+ 14 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -413,6 +413,20 @@ class TestNamenode(RMFTestCase):
                               kinit_override = True)
                               kinit_override = True)
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
+  def test_decommission_update_exclude_file_only(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "decommission",
+                       config_file="default_update_exclude_file_only.json"
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertNoMoreResources()
+
+
   def test_decommission_ha_default(self):
   def test_decommission_ha_default(self):
     self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
     self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
                        classname = "NameNode",
                        classname = "NameNode",

文件差异内容过多而无法显示
+ 423 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json


部分文件因为文件数量过多而无法显示