Browse Source

AMBARI-2019. Cannot decommission data node (ensure recommission also works). (swagle)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/trunk@1471728 13f79535-47bb-0310-9956-ffa450edef68
Siddharth Wagle 12 năm trước cách đây
mục cha
commit
9736dcbbce

+ 3 - 0
CHANGES.txt

@@ -786,6 +786,9 @@ Trunk (unreleased changes):
 
  BUG FIXES
 
+ AMBARI-2019. Cannot decommission data node (ensure recommission also works).
+ (swagle)
+ 
  AMBARI-2021. Hadoop installation on cluster with SUSE-11 failed. (smohanty)
 
  AMBARI-2010. Tasks do not timeout for failed hosts. (swagle)

+ 1 - 4
ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb

@@ -29,11 +29,8 @@ module Puppet::Parser::Functions
     # Lookup value inside a hash map.
     if var_parts.length > 1 and function_hdp_is_empty(val) and function_hdp_is_empty(lookupvar("configuration")) == false and function_hdp_is_empty(lookupvar("#{var_parts[-2]}")) == false
       keyHash = var_parts[-2]
-      puts "keyHash  #{keyHash}"      
       hashMap = lookupvar("#{keyHash}") 
-      puts "hashMap #{hashMap}"
-      puts "default #{default}"   
-      val = hashMap.fetch(var_name, default.to_s)      
+      val = hashMap.fetch(var_name, default.to_s)
     end
     # To workaround string-boolean comparison issues,
     # ensure that we return boolean result if the default value

+ 7 - 9
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -4028,6 +4028,7 @@ public class AmbariManagementControllerImpl implements
       throws AmbariException {
     // Find hdfs admin host, just decommission from namenode.
     String clusterName = decommissionRequest.getClusterName();
+    Cluster cluster = clusters.getCluster(clusterName);
     String serviceName = decommissionRequest.getServiceName();
     String namenodeHost = clusters.getCluster(clusterName)
         .getService(serviceName).getServiceComponent(Role.NAMENODE.toString())
@@ -4052,14 +4053,11 @@ public class AmbariManagementControllerImpl implements
         new TreeMap<String, Map<String, String>>();
     configurations.put(config.getType(), config.getProperties());
 
-    Map<String, Config> hdfsSiteConfig = clusters.getCluster(clusterName).getService("HDFS")
-        .getDesiredConfigs();
-    if (hdfsSiteConfig != null) {
-      for (Map.Entry<String, Config> entry: hdfsSiteConfig.entrySet()) {
-        configurations
-          .put(entry.getValue().getType(), entry.getValue().getProperties());
-      }
-    }
+    Map<String, Map<String, String>> configTags = new TreeMap<String,
+      Map<String, String>>();
+
+    findConfigurationPropertiesWithOverrides(configurations, configTags,
+      cluster, serviceName, namenodeHost);
 
     stage.addHostRoleExecutionCommand(
         namenodeHost,
@@ -4073,8 +4071,8 @@ public class AmbariManagementControllerImpl implements
       Role.DECOMMISSION_DATANODE.toString()).getExecutionCommand();
 
     execCmd.setConfigurations(configurations);
+    execCmd.setConfigurationTags(configTags);
 
-    Cluster cluster = clusters.getCluster(clusterName);
     Map<String, String> params = new TreeMap<String, String>();
     params.put("jdk_location", this.jdkResourceUrl);
     params.put("stack_version", cluster.getDesiredStackVersion()

+ 98 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -4609,6 +4609,104 @@ public class AmbariManagementControllerTest {
         taskStatuses.get(0).getRole());
   }
 
+  @Test
+  public void testDecommissonDatanodeAction() throws AmbariException {
+    String clusterName = "foo1";
+    createCluster(clusterName);
+    clusters.getCluster(clusterName)
+      .setDesiredStackVersion(new StackId("HDP-0.1"));
+    String serviceName = "HDFS";
+    createService(clusterName, serviceName, null);
+    String componentName1 = "NAMENODE";
+    String componentName2 = "DATANODE";
+    String componentName3 = "HDFS_CLIENT";
+
+    Map<String, String> mapRequestProps = new HashMap<String, String>();
+    mapRequestProps.put("context", "Called from a test");
+
+    createServiceComponent(clusterName, serviceName, componentName1,
+      State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName2,
+      State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName3,
+      State.INIT);
+
+    String host1 = "h1";
+    clusters.addHost(host1);
+    clusters.getHost("h1").setOsType("centos5");
+    clusters.getHost("h1").persist();
+    String host2 = "h2";
+    clusters.addHost(host2);
+    clusters.getHost("h2").setOsType("centos6");
+    clusters.getHost("h2").persist();
+
+    clusters.mapHostToCluster(host1, clusterName);
+    clusters.mapHostToCluster(host2, clusterName);
+
+    createServiceComponentHost(clusterName, serviceName, componentName1,
+      host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2,
+      host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2,
+      host2, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3,
+      host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3,
+      host2, null);
+
+    // Install
+    installService(clusterName, serviceName, false, false);
+
+    // Create and attach config
+    Map<String, String> configs = new HashMap<String, String>();
+    configs.put("a", "b");
+
+    ConfigurationRequest cr1,cr2;
+    cr1 = new ConfigurationRequest(clusterName, "hdfs-site","version1",
+      configs);
+    ClusterRequest crReq = new ClusterRequest(null, clusterName, null, null);
+    crReq.setDesiredConfig(cr1);
+    controller.updateCluster(crReq, null);
+    Map<String, String> props = new HashMap<String, String>();
+    props.put("datanodes", host2);
+    cr2 = new ConfigurationRequest(clusterName, "hdfs-exclude-file", "tag1",
+      props);
+    crReq = new ClusterRequest(null, clusterName, null, null);
+    crReq.setDesiredConfig(cr2);
+    controller.updateCluster(crReq, null);
+
+    // Start
+    startService(clusterName, serviceName, false, false);
+
+    Cluster cluster = clusters.getCluster(clusterName);
+    Service s = cluster.getService(serviceName);
+    Assert.assertEquals(State.STARTED, s.getDesiredState());
+
+    Set<ActionRequest> requests = new HashSet<ActionRequest>();
+    Map<String, String> params = new HashMap<String, String>(){{
+      put("test", "test");
+    }};
+    ActionRequest request = new ActionRequest(clusterName, "HDFS",
+      Role.DECOMMISSION_DATANODE.name(), params);
+    params.put("excludeFileTag", "tag1");
+    requests.add(request);
+
+    Map<String, String> requestProperties = new HashMap<String, String>();
+    requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
+
+    RequestStatusResponse response = controller.createActions(requests,
+      requestProperties);
+
+    List<HostRoleCommand> storedTasks = actionDB.getRequestTasks(response.getRequestId());
+    ExecutionCommand execCmd = storedTasks.get(0).getExecutionCommandWrapper
+      ().getExecutionCommand();
+    assertNotNull(storedTasks);
+    Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-site"));
+    Assert.assertEquals(1, storedTasks.size());
+    Assert.assertEquals(host2, execCmd.getConfigurations().get
+      ("hdfs-exclude-file").get("datanodes"));
+  }
+
   @Test
   public void testConfigsAttachedToServiceChecks() throws AmbariException {
     String clusterName = "foo1";