Ver código fonte

AMBARI-11086 - Upgrade Pack Configure Task Must Preserve Additions When Deleting (jonathanhurley)

Jonathan Hurley 10 anos atrás
pai
commit
7e4cba5cfb
52 arquivos alterados com 641 adições e 153 exclusões
  1. 86 13
      ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
  2. 8 2
      ambari-common/src/main/python/resource_management/libraries/functions/format.py
  3. 53 0
      ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
  4. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java
  5. 122 20
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
  6. 1 5
      ambari-server/src/main/java/org/apache/ambari/server/state/ConfigMergeHelper.java
  7. 2 1
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
  8. 2 1
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
  9. 4 3
      ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
  10. 3 2
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
  11. 2 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
  12. 2 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
  13. 2 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
  14. 2 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
  15. 2 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
  16. 2 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
  17. 2 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
  18. 2 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
  19. 2 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
  20. 2 1
      ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/upgrade.py
  21. 2 1
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
  22. 2 1
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
  23. 2 1
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
  24. 2 1
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
  25. 2 1
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
  26. 5 4
      ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
  27. 3 1
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
  28. 2 1
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
  29. 3 1
      ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
  30. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py
  31. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py
  32. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py
  33. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py
  34. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py
  35. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py
  36. 2 1
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py
  37. 2 1
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
  38. 2 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
  39. 2 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
  40. 2 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
  41. 2 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
  42. 2 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
  43. 2 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
  44. 2 1
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
  45. 2 1
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
  46. 2 1
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
  47. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
  48. 180 63
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
  49. 35 0
      ambari-server/src/test/python/TestUtils.py
  50. 3 1
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  51. 1 1
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
  52. 62 0
      ambari-server/src/test/resources/stacks/HDP/2.0.5/services/ZOOKEEPER/configuration/zoo.cfg.xml

+ 86 - 13
ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py

@@ -23,8 +23,47 @@ __all__ = ["select", "create", "get_hadoop_conf_dir", "get_hadoop_dir"]
 import version
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_hdp_version import get_hdp_version
 from resource_management.libraries.script.script import Script
 
+# a mapping of Ambari server role to hdp-select component name for all
+# non-clients
+SERVER_ROLE_DIRECTORY_MAP = {
+  'ACCUMULO_MASTER' : 'accumulo-master',
+  'ACCUMULO_MONITOR' : 'accumulo-monitor',
+  'ACCUMULO_GC' : 'accumulo-gc',
+  'ACCUMULO_TRACER' : 'accumulo-tracer',
+  'ACCUMULO_TSERVER' : 'accumulo-tablet',
+  'ATLAS_SERVER' : 'atlas-server',
+  'FLUME_HANDLER' : 'flume-server',
+  'FALCON_SERVER' : 'falcon-server',
+  'NAMENODE' : 'hadoop-hdfs-namenode',
+  'DATANODE' : 'hadoop-hdfs-datanode',
+  'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
+  'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
+  'JOURNALNODE' : 'hadoop-hdfs-journalnode',
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'WEBHCAT_SERVER' : 'hive-webhcat',
+  'KAFKA_BROKER' : 'kafka-broker',
+  'KNOX_GATEWAY' : 'knox-server',
+  'OOZIE_SERVER' : 'oozie-server',
+  'RANGER_ADMIN' : 'ranger-admin',
+  'RANGER_USERSYNC' : 'ranger-usersync',
+  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
+  'NIMBUS' : 'storm-nimbus',
+  'SUPERVISOR' : 'storm-supervisor',
+  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
+  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
+  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
+  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
+  'ZOOKEEPER_SERVER' : 'zookeeper-server'
+}
+
 TEMPLATE = "conf-select {0} --package {1} --stack-version {2} --conf-version 0"
 HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
 HADOOP_DIR_DEFAULTS = {
@@ -43,14 +82,19 @@ def _valid(stack_name, package, ver):
 
   return True
 
-def _is_upgrade():
+def _get_upgrade_stack():
+  """
+  Gets the stack name and stack version if an upgrade is currently in progress.
+  :return:  the stack name and stack version as a tuple, or None if an
+  upgrade is not in progress.
+  """
   from resource_management.libraries.functions.default import default
   direction = default("/commandParams/upgrade_direction", None)
   stack_name = default("/hostLevelParams/stack_name", None)
-  ver = default("/commandParams/version", None)
+  stack_version = default("/commandParams/version", None)
 
-  if direction and stack_name and ver:
-    return (stack_name, ver)
+  if direction and stack_name and stack_version:
+    return (stack_name, stack_version)
 
   return None
 
@@ -94,17 +138,19 @@ def get_hadoop_conf_dir():
       by conf-select.  This is in the form /usr/hdp/VERSION/hadoop/conf to make sure
       the configs are written in the correct place
   """
-
   hadoop_conf_dir = "/etc/hadoop/conf"
 
   if Script.is_hdp_stack_greater_or_equal("2.2"):
     hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
 
-    res = _is_upgrade()
+    stack_info = _get_upgrade_stack()
+
+    if stack_info is not None and Script.is_hdp_stack_greater_or_equal("2.3"):
+      stack_name = stack_info[0]
+      stack_version = stack_info[1]
 
-    if res is not None and Script.is_hdp_stack_greater_or_equal("2.3"):
-      select(res[0], "hadoop", res[1])
-      hadoop_conf_dir = "/usr/hdp/{0}/hadoop/conf".format(res[1])
+      select(stack_name, "hadoop", stack_version)
+      hadoop_conf_dir = "/usr/hdp/{0}/hadoop/conf".format(stack_version)
 
   return hadoop_conf_dir
 
@@ -113,7 +159,9 @@ def get_hadoop_dir(target):
   Return the hadoop shared directory in the following override order
   1. Use default for 2.1 and lower
   2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
-  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}
+  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
+  However, if the upgrade has not yet invoked hdp-select, return the current
+  version of the component.
   :target: the target directory
   """
 
@@ -125,10 +173,35 @@ def get_hadoop_dir(target):
   if Script.is_hdp_stack_greater_or_equal("2.2"):
     hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
 
-    res = _is_upgrade()
+    stack_info = _get_upgrade_stack()
+
+    if stack_info is not None:
+      stack_version = stack_info[1]
+
+      # determine if hdp-select has been run and if not, then use the current
+      # hdp version until this component is upgraded
+      current_hdp_version = get_role_component_current_hdp_version()
+      if current_hdp_version is not None and stack_version != current_hdp_version:
+        stack_version = current_hdp_version
 
-    if res is not None:
-      hadoop_dir = HADOOP_DIR_TEMPLATE.format(res[1], "hadoop", target)
+      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
 
   return hadoop_dir
+
+
+def get_role_component_current_hdp_version():
+  """
+  Gets the current HDP version of the component that this role command is for.
+  :return:  the current HDP version of the specified component or None
+  """
+  command_role = default("/role", "")
+  if command_role in SERVER_ROLE_DIRECTORY_MAP:
+    hdp_select_component = SERVER_ROLE_DIRECTORY_MAP[command_role]
+    current_hdp_version = get_hdp_version(hdp_select_component)
+
+    Logger.info("{0} is currently at version {1}".format(
+      hdp_select_component, current_hdp_version))
     
+    return current_hdp_version
+
+  return None

+ 8 - 2
ambari-common/src/main/python/resource_management/libraries/functions/format.py

@@ -41,8 +41,14 @@ class ConfigurationFormatter(Formatter):
     env = Environment.get_instance()
     variables = kwargs
     params = env.config.params
-    all_params = checked_unite(variables, params)
-    
+
+    # don't use checked_unite for this as it would interfere with reload(module)
+    # for things like params and status_params; instead, start out copying
+    # the environment parameters and add in any locally declared variables to
+    # override existing env parameters
+    all_params = params.copy()
+    all_params.update(variables)
+
     self.convert_field = self.convert_field_protected
     result_protected = self.vformat(format_string, args, all_params)
     

+ 53 - 0
ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py

@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+
+# hdp-select set oozie-server 2.2.0.0-1234
+TEMPLATE = "hdp-select set {0} {1}"
+
+def select(component, version):
+  """
+  Executes hdp-select on the specific component and version. Some global
+  variables that are imported via params/status_params/params_linux will need
+  to be recalcuated after the hdp-select. However, python does not re-import
+  existing modules. The only way to ensure that the configuration variables are
+  recalculated is to call reload(...) on each module that has global parameters.
+  After invoking hdp-select, this function will also reload params, status_params,
+  and params_linux.
+  :param component: the hdp-select component, such as oozie-server
+  :param version: the version to set the component to, such as 2.2.0.0-1234
+  """
+  command = TEMPLATE.format(component, version)
+  Execute(command)
+
+  # don't trust the ordering of modules:
+  # 1) status_params
+  # 2) params_linux
+  # 3) params
+  modules = sys.modules
+  param_modules = "status_params", "params_linux", "params"
+  for moduleName in param_modules:
+    if moduleName in modules:
+      module = modules.get(moduleName)
+      reload(module)
+      Logger.info("After hdp-select {0}, reloaded module {1}".format(component, moduleName))

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/checks/ClientRetryPropertyCheck.java

@@ -101,7 +101,7 @@ public class ClientRetryPropertyCheck extends AbstractCheckDescriptor {
     }
 
     if (services.containsKey("OOZIE")) {
-      String oozieClientRetry = getProperty(request, "oozie-env", "template");
+      String oozieClientRetry = getProperty(request, "oozie-env", "content");
       if (null == oozieClientRetry || !oozieClientRetry.contains("-Doozie.connection.retry.count")) {
         errorMessages.add(getFailReason(OOZIE_CLIENT_RETRY_MISSING_KEY, prerequisiteCheck, request));
         prerequisiteCheck.getFailedOn().add("OOZIE");

+ 122 - 20
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java

@@ -21,13 +21,16 @@ import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.ConfigurationRequest;
@@ -40,6 +43,7 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigMergeHelper;
 import org.apache.ambari.server.state.ConfigMergeHelper.ThreeWayValue;
 import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
 import org.apache.commons.lang.StringUtils;
@@ -47,6 +51,7 @@ import org.apache.commons.lang.StringUtils;
 import com.google.gson.Gson;
 import com.google.gson.reflect.TypeToken;
 import com.google.inject.Inject;
+import com.google.inject.Provider;
 
 /**
  * The {@link ConfigureAction} is used to alter a configuration property during
@@ -80,6 +85,13 @@ public class ConfigureAction extends AbstractServerAction {
   @Inject
   private Configuration m_configuration;
 
+  /**
+   * Used to lookup stack properties which are the configuration properties that
+   * are defined on the stack.
+   */
+  @Inject
+  private Provider<AmbariMetaInfo> m_ambariMetaInfo;
+
   @Inject
   private ConfigMergeHelper m_mergeHelper;
 
@@ -184,6 +196,7 @@ public class ConfigureAction extends AbstractServerAction {
     boolean changedValues = false;
 
     // !!! do transfers first before setting defined values
+    StringBuilder outputBuffer = new StringBuilder(250);
     for (ConfigureTask.Transfer transfer : transfers) {
       switch (transfer.operation) {
         case COPY:
@@ -194,9 +207,16 @@ public class ConfigureAction extends AbstractServerAction {
             if (base.containsKey(transfer.fromKey)) {
               newValues.put(transfer.toKey, base.get(transfer.fromKey));
               changedValues = true;
+
+              // append standard output
+              outputBuffer.append(MessageFormat.format("Copied {0}/{1}\n", configType, key));
             } else if (StringUtils.isNotBlank(transfer.defaultValue)) {
               newValues.put(transfer.toKey, transfer.defaultValue);
               changedValues = true;
+
+              // append standard output
+              outputBuffer.append(MessageFormat.format("Created {0}/{1} with default value {2}\n",
+                  configType, transfer.toKey, transfer.defaultValue));
             }
           } else {
             // !!! copying from another configuration
@@ -208,9 +228,18 @@ public class ConfigureAction extends AbstractServerAction {
               if (otherValues.containsKey(transfer.fromKey)) {
                 newValues.put(transfer.toKey, otherValues.get(transfer.fromKey));
                 changedValues = true;
+
+                // append standard output
+                outputBuffer.append(MessageFormat.format("Copied {0}/{1} to {2}\n",
+                    transfer.fromType, transfer.fromKey, configType));
               } else if (StringUtils.isNotBlank(transfer.defaultValue)) {
                 newValues.put(transfer.toKey, transfer.defaultValue);
                 changedValues = true;
+
+                // append standard output
+                outputBuffer.append(MessageFormat.format(
+                    "Created {0}/{1} with default value {2}\n", configType, transfer.toKey,
+                    transfer.defaultValue));
               }
             }
           }
@@ -222,9 +251,17 @@ public class ConfigureAction extends AbstractServerAction {
           if (newValues.containsKey(transfer.fromKey)) {
             newValues.put(transfer.toKey, newValues.remove(transfer.fromKey));
             changedValues = true;
+
+            // append standard output
+            outputBuffer.append(MessageFormat.format("Renamed {0}/{1} to {2}/{3}\n", configType,
+                transfer.fromKey, configType, transfer.toKey));
           } else if (StringUtils.isNotBlank(transfer.defaultValue)) {
             newValues.put(transfer.toKey, transfer.defaultValue);
             changedValues = true;
+
+            // append standard output
+            outputBuffer.append(MessageFormat.format("Created {0}/{1} with default value {2}\n",
+                configType, transfer.toKey, transfer.defaultValue));
           }
 
           break;
@@ -232,22 +269,39 @@ public class ConfigureAction extends AbstractServerAction {
           if ("*".equals(transfer.deleteKey)) {
             newValues.clear();
 
+            // append standard output
+            outputBuffer.append(MessageFormat.format("Deleted all keys from {0}\n", configType));
+
             for (String keeper : transfer.keepKeys) {
               newValues.put(keeper, base.get(keeper));
+
+              // append standard output
+              outputBuffer.append(MessageFormat.format("Preserved {0}/{1} after delete\n",
+                  configType, keeper));
             }
 
-            // !!! with preserved edits, find the values that are different
-            // from the stack-defined and keep them
+            // !!! with preserved edits, find the values that are different from
+            // the stack-defined and keep them - also keep values that exist in
+            // the config but not on the stack
             if (transfer.preserveEdits) {
-              List<String> edited = findChangedValues(clusterName, config);
+              List<String> edited = findValuesToPreserve(clusterName, config);
               for (String changed : edited) {
                 newValues.put(changed, base.get(changed));
+
+                // append standard output
+                outputBuffer.append(MessageFormat.format("Preserved {0}/{1} after delete\n",
+                    configType, changed));
               }
             }
+
             changedValues = true;
           } else {
             newValues.remove(transfer.deleteKey);
             changedValues = true;
+
+            // append standard output
+            outputBuffer.append(MessageFormat.format("Deleted {0}/{1}\n", configType,
+                transfer.deleteKey));
           }
 
           break;
@@ -274,6 +328,7 @@ public class ConfigureAction extends AbstractServerAction {
       // the configure being able to take a list of transfers without a
       // key/value to set
       newValues.put(key, value);
+      outputBuffer.append(MessageFormat.format("{0}/{1} changed to {2}\n", configType, key, value));
     }
 
     // !!! check to see if we're going to a new stack and double check the
@@ -283,8 +338,7 @@ public class ConfigureAction extends AbstractServerAction {
       config.setProperties(newValues);
       config.persist(false);
 
-      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
-          MessageFormat.format("Updated configuration ''{0}''", configType), "");
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
     }
 
     // !!! values are different and within the same stack.  create a new
@@ -308,37 +362,85 @@ public class ConfigureAction extends AbstractServerAction {
 
 
   /**
-   * @param clusterName the cluster name
-   * @param config      the config with the tag to find conflicts
-   * @return            the list of changed property keys
+   * Finds the values that should be preserved during a delete. This includes:
+   * <ul>
+   * <li>Properties that existed on the stack but were changed to a different
+   * value</li>
+   * <li>Properties that do not exist on the stack</li>
+   * </ul>
+   *
+   * @param clusterName
+   *          the cluster name
+   * @param config
+   *          the config with the tag to find conflicts
+   * @return the list of changed property keys
    * @throws AmbariException
    */
-  private List<String> findChangedValues(String clusterName, Config config)
+  private List<String> findValuesToPreserve(String clusterName, Config config)
       throws AmbariException {
+    List<String> result = new ArrayList<String>();
 
     Map<String, Map<String, ThreeWayValue>> conflicts =
         m_mergeHelper.getConflicts(clusterName, config.getStackId());
 
     Map<String, ThreeWayValue> conflictMap = conflicts.get(config.getType());
 
-    if (null == conflictMap || conflictMap.isEmpty()) {
-      return Collections.emptyList();
+    // process the conflicts, if any, and add them to the list
+    if (null != conflictMap && !conflictMap.isEmpty()) {
+      for (Map.Entry<String, ThreeWayValue> entry : conflictMap.entrySet()) {
+        ThreeWayValue twv = entry.getValue();
+        if (null == twv.oldStackValue) {
+          result.add(entry.getKey());
+        } else if (null != twv.savedValue && !twv.oldStackValue.equals(twv.savedValue)) {
+          result.add(entry.getKey());
+        }
+      }
     }
 
-    List<String> result = new ArrayList<String>();
 
-    for (Map.Entry<String, ThreeWayValue> entry : conflictMap.entrySet()) {
-      ThreeWayValue twv = entry.getValue();
-      if (null == twv.oldStackValue) {
-        result.add(entry.getKey());
-      } else if (null != twv.savedValue && !twv.oldStackValue.equals(twv.savedValue)) {
-        result.add(entry.getKey());
+    String configType = config.getType();
+    Cluster cluster = m_clusters.getCluster(clusterName);
+    StackId oldStack = cluster.getCurrentStackVersion();
+
+    // iterate over all properties for every cluster service; if the property
+    // has the correct config type (ie oozie-site or hdfs-site) then add it to
+    // the list of original stack propertiess
+    Set<String> stackPropertiesForType = new HashSet<String>(50);
+    for (String serviceName : cluster.getServices().keySet()) {
+      Set<PropertyInfo> serviceProperties = m_ambariMetaInfo.get().getServiceProperties(
+          oldStack.getStackName(), oldStack.getStackVersion(), serviceName);
+
+      for (PropertyInfo property : serviceProperties) {
+        String type = ConfigHelper.fileNameToConfigType(property.getFilename());
+        if (type.equals(configType)) {
+          stackPropertiesForType.add(property.getName());
+        }
       }
     }
 
-    return result;
-  }
+    // now iterate over all stack properties, adding them to the list if they
+    // match
+    Set<PropertyInfo> stackProperties = m_ambariMetaInfo.get().getStackProperties(
+        oldStack.getStackName(),
+        oldStack.getStackVersion());
 
+    for (PropertyInfo property : stackProperties) {
+      String type = ConfigHelper.fileNameToConfigType(property.getFilename());
+      if (type.equals(configType)) {
+        stackPropertiesForType.add(property.getName());
+      }
+    }
 
+    // see if any keys exist in the old config but not the the original stack
+    // for this config type; that means they were added and should be preserved
+    Map<String, String> base = config.getProperties();
+    Set<String> baseKeys = base.keySet();
+    for( String baseKey : baseKeys ){
+      if (!stackPropertiesForType.contains(baseKey)) {
+        result.add(baseKey);
+      }
+    }
 
+    return result;
+  }
 }

+ 1 - 5
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigMergeHelper.java

@@ -27,7 +27,6 @@ import java.util.regex.Pattern;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.commons.collections.CollectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,9 +50,6 @@ public class ConfigMergeHelper {
   @Inject
   private Provider<AmbariMetaInfo> m_ambariMetaInfo;
 
-  @Inject
-  private Provider<RepositoryVersionDAO> repositoryVersionDaoProvider;
-
   @SuppressWarnings("unchecked")
   public Map<String, Map<String, ThreeWayValue>> getConflicts(String clusterName, StackId targetStack) throws AmbariException {
     Cluster cluster = m_clusters.get().getCluster(clusterName);
@@ -98,7 +94,7 @@ public class ConfigMergeHelper {
       if (null != config) {
         Set<String> valueKeys = config.getProperties().keySet();
 
-        customValueKeys = (Collection<String>) CollectionUtils.subtract(valueKeys, oldPairs.keySet());
+        customValueKeys = CollectionUtils.subtract(valueKeys, oldPairs.keySet());
       }
 
       if (null != customValueKeys) {

+ 2 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py

@@ -19,6 +19,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from falcon import falcon
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -52,7 +53,7 @@ class FalconClientLinux(FalconClient):
 
     Logger.info("Executing Falcon Client Rolling Upgrade pre-restart")
     conf_select.select(params.stack_name, "falcon", params.version)
-    Execute(format("hdp-select set falcon-client {version}"))
+    hdp_select.select("falcon-client", params.version)
 
   def security_status(self, env):
     import status_params

+ 2 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py

@@ -21,6 +21,7 @@ import falcon_server_upgrade
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import *
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -75,7 +76,7 @@ class FalconServerLinux(FalconServer):
 
     Logger.info("Executing Falcon Server Rolling Upgrade pre-restart")
     conf_select.select(params.stack_name, "falcon", params.version)
-    Execute(format("hdp-select set falcon-server {version}"))
+    hdp_select.select("falcon-server", params.version)
     falcon_server_upgrade.pre_start_restore()
 
   def security_status(self, env):

+ 4 - 3
ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py

@@ -23,9 +23,10 @@ from flume import flume
 from flume import get_desired_state
 
 from resource_management import *
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
 from resource_management.libraries.functions.flume_agent_helper import get_flume_status
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+
 import service_mapping
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -114,11 +115,11 @@ class FlumeHandler(Script):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
+    if not params.version or Script.is_hdp_stack_less_than("2.2"):
       return
 
     Logger.info("Executing Flume Rolling Upgrade pre-restart")
-    Execute(format("hdp-select set flume-server {version}"))
+    hdp_select.select("flume-server", params.version)
     flume_upgrade.pre_start_restore()
 
 if __name__ == "__main__":

+ 3 - 2
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py

@@ -21,6 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from hbase import hbase
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -56,13 +57,13 @@ class HbaseClientDefault(HbaseClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hbase", params.version)
-      Execute(format("hdp-select set hbase-client {version}"))
+      hdp_select.select("hbase-client", params.version)
 
       # set all of the hadoop clients since hbase client is upgraded as part
       # of the final "CLIENTS" group and we need to ensure that hadoop-client
       # is also set
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

+ 2 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py

@@ -22,6 +22,7 @@ from resource_management import *
 from resource_management.core.resources.system import Execute
 from resource_management.core import shell
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.decorator import retry
 
@@ -30,7 +31,7 @@ def prestart(env, hdp_component):
 
   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "hbase", params.version)
-    Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
+    hdp_select.select(hdp_component, params.version)
 
 def post_regionserver(env):
   import params

+ 2 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py

@@ -20,6 +20,7 @@ import datanode_upgrade
 from hdfs_datanode import datanode
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
@@ -73,7 +74,7 @@ class DataNodeDefault(DataNode):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-hdfs-datanode {version}"))
+      hdp_select.select("hadoop-hdfs-datanode", params.version)
 
   def post_rolling_restart(self, env):
     Logger.info("Executing DataNode Rolling Upgrade post-restart")

+ 2 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py

@@ -19,6 +19,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -61,7 +62,7 @@ class HdfsClientDefault(HdfsClient):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
   def security_status(self, env):
     import status_params

+ 2 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py

@@ -19,6 +19,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, \
   format_hdp_stack_version
 from resource_management.libraries.functions.format import format
@@ -51,7 +52,7 @@ class JournalNodeDefault(JournalNode):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-hdfs-journalnode {version}"))
+      hdp_select.select("hadoop-hdfs-journalnode", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py

@@ -23,6 +23,7 @@ import json
 import  tempfile
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -111,7 +112,7 @@ class NameNodeDefault(NameNode):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-hdfs-namenode {version}"))
+      hdp_select.select("hadoop-hdfs-namenode", params.version)
 
   def post_rolling_restart(self, env):
     Logger.info("Executing Rolling Upgrade post-restart")

+ 2 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py

@@ -20,6 +20,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from hive import hive
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
@@ -55,7 +56,7 @@ class HiveClientDefault(HiveClient):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

+ 2 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py

@@ -21,6 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -78,7 +79,7 @@ class HiveMetastoreDefault(HiveMetastore):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hive", params.version)
-      Execute(format("hdp-select set hive-metastore {version}"))
+      hdp_select.select("hive-metastore", params.version)
 
   def security_status(self, env):
     import status_params

+ 2 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py

@@ -23,6 +23,7 @@ from resource_management import *
 from hive import hive
 from hive_service import hive_service
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -98,7 +99,7 @@ class HiveServerDefault(HiveServer):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hive", params.version)
-      Execute(format("hdp-select set hive-server2 {version}"))
+      hdp_select.select("hive-server2", params.version)
       params.HdfsResource(InlineTemplate(params.mapreduce_tar_destination).get_content(),
                           type="file",
                           action="create_on_execute",

+ 2 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py

@@ -20,6 +20,7 @@ Ambari Agent
 """
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -77,7 +78,7 @@ class WebHCatServerDefault(WebHCatServer):
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hive-webhcat {version}"))
+      hdp_select.select("hive-webhcat", params.version)
 
   def security_status(self, env):
     import status_params

+ 2 - 1
ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/upgrade.py

@@ -21,6 +21,7 @@ limitations under the License.
 from resource_management import *
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 
 def prestart(env, hdp_component):
@@ -28,4 +29,4 @@ def prestart(env, hdp_component):
 
   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "kafka", params.version)
-    Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
+    hdp_select.select(hdp_component, params.version)

+ 2 - 1
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py

@@ -20,6 +20,7 @@ limitations under the License.
 from resource_management import *
 
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, validate_security_config_properties, get_params_from_filesystem, \
   FILE_TYPE_XML
@@ -99,7 +100,7 @@ class KnoxGatewayDefault(KnoxGateway):
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       upgrade.backup_data()
       conf_select.select(params.stack_name, "knox", params.version)
-      Execute(format("hdp-select set knox-server {version}"))
+      hdp_select.select("knox-server", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py

@@ -21,6 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 
 from oozie import oozie
 from oozie_service import oozie_service
@@ -57,7 +58,7 @@ class OozieClient(Script):
 
     Logger.info("Executing Oozie Client Rolling Upgrade pre-restart")
     conf_select.select(params.stack_name, "oozie", params.version)
-    Execute(format("hdp-select set oozie-client {version}"))
+    hdp_select.select("oozie-client", params.version)
 
   # We substitute some configs (oozie.authentication.kerberos.principal) before generation (see oozie.py and params.py).
   # This function returns changed configs (it's used for config generation before config download)

+ 2 - 1
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py

@@ -26,6 +26,7 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import compare_versions
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations
 from resource_management.libraries.functions.security_commons import cached_kinit_executor
@@ -161,7 +162,7 @@ class OozieServerDefault(OozieServer):
     oozie_server_upgrade.backup_configuration()
 
     conf_select.select(params.stack_name, "oozie", params.version)
-    Execute(format("hdp-select set oozie-server {version}"))
+    hdp_select.select("oozie-server", params.version)
 
     oozie_server_upgrade.restore_configuration()
     oozie_server_upgrade.prepare_libext_directory()

+ 2 - 1
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py

@@ -23,6 +23,7 @@ import sys
 import os
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from pig import pig
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -47,7 +48,7 @@ class PigClientLinux(PigClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
   def install(self, env):
     self.install_packages(env)

+ 2 - 1
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py

@@ -20,6 +20,7 @@ limitations under the License.
 """
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.format import format
 
 def prestart(env, hdp_component):
@@ -27,4 +28,4 @@ def prestart(env, hdp_component):
 
   if params.version and params.stack_is_hdp22_or_further:
     conf_select.select(params.stack_name, hdp_component, params.version)
-    Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
+    hdp_select.select(hdp_component, params.version)

+ 5 - 4
ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py

@@ -20,6 +20,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from slider import slider
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -36,14 +37,14 @@ class SliderClient(Script):
     env.set_params(params)
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      conf_select.select(params.stack_name, "slider", params.version)      
-      Execute(format("hdp-select set slider-client {version}"))
+      conf_select.select(params.stack_name, "slider", params.version)
+      hdp_select.select("slider-client", params.version)
 
       # also set all of the hadoop clients since slider client is upgraded as
       # part of the final "CLIENTS" group and we need to ensure that
       # hadoop-client is also set
-      conf_select.select(params.stack_name, "hadoop", params.version)      
-      Execute(format("hdp-select set hadoop-client {version}"))
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      hdp_select.select("hadoop-client", params.version)
 
   @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
   def install(self, env):

+ 3 - 1
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py

@@ -21,6 +21,7 @@ limitations under the License.
 import sys
 import os
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.check_process_status import check_process_status
@@ -75,7 +76,8 @@ class JobHistoryServer(Script):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "spark", params.version)
-      Execute(format("hdp-select set spark-historyserver {version}"))
+      hdp_select.select("spark-historyserver", params.version)
+
       params.HdfsResource(InlineTemplate(params.tez_tar_destination).get_content(),
                           type="file",
                           action="create_on_execute",

+ 2 - 1
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py

@@ -21,6 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
@@ -51,7 +52,7 @@ class SparkClient(Script):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "spark", params.version)
-      Execute(format("hdp-select set spark-client {version}"))
+      hdp_select.select("spark-client", params.version)
 
 if __name__ == "__main__":
   SparkClient().execute()

+ 3 - 1
ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py

@@ -22,6 +22,7 @@ from resource_management.core.exceptions import ClientComponentHasNoStatus
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from sqoop import sqoop
@@ -52,7 +53,8 @@ class SqoopClientDefault(SqoopClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "sqoop", params.version)
-      Execute(format("hdp-select set sqoop-client {version}"))
+      hdp_select.select("sqoop-client", params.version)
+
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class SqoopClientWindows(SqoopClient):

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py

@@ -22,6 +22,7 @@ import sys
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
@@ -53,7 +54,7 @@ class DrpcServer(Script):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-client {version}"))
+      hdp_select.select("storm-client", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py

@@ -23,6 +23,7 @@ from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from storm import storm
@@ -56,7 +57,7 @@ class NimbusDefault(Nimbus):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-nimbus {version}"))
+      hdp_select.select("storm-nimbus", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py

@@ -23,6 +23,7 @@ from resource_management.libraries.script import Script
 from storm import storm
 from supervisord_service import supervisord_service, supervisord_check_status
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
@@ -48,7 +49,7 @@ class Nimbus(Script):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-nimbus {version}"))
+      hdp_select.select("storm-nimbus", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py

@@ -22,6 +22,7 @@ import sys
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
@@ -52,7 +53,7 @@ class StormRestApi(Script):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-client {version}"))
+      hdp_select.select("storm-client", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py

@@ -22,6 +22,7 @@ import sys
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
@@ -74,7 +75,7 @@ class SupervisorDefault(Supervisor):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-supervisor {version}"))
+      hdp_select.select("storm-supervisor", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py

@@ -24,6 +24,7 @@ from service import service
 from supervisord_service import supervisord_service, supervisord_check_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
@@ -49,7 +50,7 @@ class Supervisor(Script):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-supervisor {version}"))
+      hdp_select.select("storm-supervisor", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py

@@ -25,6 +25,7 @@ from service_check import ServiceCheck
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
@@ -77,7 +78,7 @@ class UiServerDefault(UiServer):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      Execute(format("hdp-select set storm-client {version}"))
+      hdp_select.select("storm-client", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py

@@ -22,6 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from tez import tez
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -47,7 +48,7 @@ class TezClientLinux(TezClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
   def install(self, env):
     self.install_packages(env)

+ 2 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py

@@ -21,6 +21,7 @@ Ambari Agent
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
@@ -71,7 +72,7 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-yarn-timelineserver {version}"))
+      hdp_select.select("hadoop-yarn-timelineserver", params.version)
 
   def status(self, env):
     import status_params

+ 2 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py

@@ -21,6 +21,7 @@ Ambari Agent
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
@@ -71,7 +72,7 @@ class HistoryServerDefault(HistoryServer):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-mapreduce-historyserver {version}"))
+      hdp_select.select("hadoop-mapreduce-historyserver", params.version)
       params.HdfsResource(InlineTemplate(params.mapreduce_tar_destination).get_content(),
                           type="file",
                           action="create_on_execute",

+ 2 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py

@@ -22,6 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from yarn import yarn
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -57,7 +58,7 @@ class MapReduce2ClientDefault(MapReduce2Client):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

+ 2 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py

@@ -23,6 +23,7 @@ import nodemanager_upgrade
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
@@ -73,7 +74,7 @@ class NodemanagerDefault(Nodemanager):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-yarn-nodemanager {version}"))
+      hdp_select.select("hadoop-yarn-nodemanager", params.version)
 
   def post_rolling_restart(self, env):
     Logger.info("Executing NodeManager Rolling Upgrade post-restart")

+ 2 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py

@@ -21,6 +21,7 @@ Ambari Agent
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -95,7 +96,7 @@ class ResourcemanagerDefault(Resourcemanager):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-yarn-resourcemanager {version}"))
+      hdp_select.select("hadoop-yarn-resourcemanager", params.version)
 
   def start(self, env, rolling_restart=False):
     import params

+ 2 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py

@@ -22,6 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from yarn import yarn
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -57,7 +58,7 @@ class YarnClientDefault(YarnClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      Execute(format("hdp-select set hadoop-client {version}"))
+      hdp_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

+ 2 - 1
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py

@@ -23,6 +23,7 @@ import sys
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 
@@ -75,7 +76,7 @@ def zookeeper(type = None, rolling_restart = False):
     # This path may be missing after Ambari upgrade. We need to create it.
     if (not rolling_restart) and (not os.path.exists("/usr/hdp/current/zookeeper-server")) and params.current_version:
       conf_select(params.stack_name, "zookeeper", params.current_version)
-      Execute(format("hdp-select set zookeeper-server {current_version}"))
+      hdp_select.select("zookeeper-server", params.version)
 
   if (params.log4j_props != None):
     File(os.path.join(params.config_dir, "log4j.properties"),

+ 2 - 1
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py

@@ -22,6 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from ambari_commons import OSConst
@@ -66,7 +67,7 @@ class ZookeeperClientLinux(ZookeeperClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      Execute(format("hdp-select set zookeeper-client {version}"))
+      hdp_select.select("zookeeper-client", params.version)
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ZookeeperClientWindows(ZookeeperClient):

+ 2 - 1
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py

@@ -24,6 +24,7 @@ import sys
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_unique_id_and_date
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -76,7 +77,7 @@ class ZookeeperServerLinux(ZookeeperServer):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      Execute(format("hdp-select set zookeeper-server {version}"))
+      hdp_select.select("zookeeper-server", params.version)
 
   def post_rolling_restart(self, env):
     Logger.info("Executing Rolling Upgrade post-restart")

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java

@@ -155,7 +155,7 @@ public class ClientRetryPropertyCheckTest {
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
     // pass with right property
-    properties.put("template", "foo bar baz -Doozie.connection.retry.count=5 foobarbaz");
+    properties.put("content", "foo bar baz -Doozie.connection.retry.count=5 foobarbaz");
     check = new PrerequisiteCheck(null, null);
     m_check.perform(check, new PrereqCheckRequest("cluster"));
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());

+ 180 - 63
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java

@@ -28,6 +28,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
@@ -47,8 +49,11 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
 import org.apache.ambari.server.state.stack.upgrade.TransferOperation;
@@ -85,6 +90,12 @@ public class ConfigureActionTest {
   @Inject
   private HostRoleCommandFactory hostRoleCommandFactory;
 
+  @Inject
+  private ServiceFactory serviceFactory;
+
+  @Inject
+  ConfigHelper m_configHelper;
+
   @Before
   public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -97,84 +108,82 @@ public class ConfigureActionTest {
     m_injector.getInstance(PersistService.class).stop();
   }
 
-  private void makeUpgradeCluster() throws Exception {
-    String clusterName = "c1";
-    String hostName = "h1";
-
-    Clusters clusters = m_injector.getInstance(Clusters.class);
-    clusters.addCluster(clusterName, HDP_21_STACK);
-
-    StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
-    StackEntity stackEntity = stackDAO.find(HDP_21_STACK.getStackName(),
-        HDP_21_STACK.getStackVersion());
-
-    assertNotNull(stackEntity);
+  @Test
+  public void testConfigActionUpgradeAcrossStack() throws Exception {
+    makeUpgradeCluster();
 
-    Cluster c = clusters.getCluster(clusterName);
-    c.setDesiredStackVersion(HDP_21_STACK);
+    Cluster c = m_injector.getInstance(Clusters.class).getCluster("c1");
+    assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
+    c.setDesiredStackVersion(HDP_22_STACK);
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
     Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
           put("initLimit", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version1");
+    config.setTag("version2");
     config.persist();
 
     c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
+    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    // add a host component
-    clusters.addHost(hostName);
-
-    Host host = clusters.getHost(hostName);
 
-    Map<String, String> hostAttributes = new HashMap<String, String>();
-    hostAttributes.put("os_family", "redhat");
-    hostAttributes.put("os_release_version", "6");
-    host.setHostAttributes(hostAttributes);
-    host.persist();
+    Map<String, String> commandParams = new HashMap<String, String>();
+    commandParams.put("upgrade_direction", "upgrade");
+    commandParams.put("version", HDP_2_2_1_0);
+    commandParams.put("clusterName", "c1");
+    commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
+    commandParams.put(ConfigureTask.PARAMETER_KEY, "initLimit");
+    commandParams.put(ConfigureTask.PARAMETER_VALUE, "11");
 
-    String urlInfo = "[{'repositories':["
-        + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'HDP-2.1.1'}"
-        + "], 'OperatingSystems/os_type':'redhat6'}]";
+    ExecutionCommand executionCommand = new ExecutionCommand();
+    executionCommand.setCommandParams(commandParams);
+    executionCommand.setClusterName("c1");
 
-    m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_2_0_0);
-    repoVersionDAO.create(stackEntity, HDP_2_2_1_0, String.valueOf(System.currentTimeMillis()),
-        "pack", urlInfo);
+    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
+        null, null);
 
-    c.createClusterVersion(HDP_21_STACK, HDP_2_2_0_0, "admin", RepositoryVersionState.UPGRADING);
-    c.createClusterVersion(HDP_21_STACK, HDP_2_2_1_0, "admin", RepositoryVersionState.INSTALLING);
+    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(
+        executionCommand));
 
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_0_0, RepositoryVersionState.CURRENT);
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.INSTALLED);
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADING);
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADED);
-    c.setCurrentStackVersion(HDP_21_STACK);
+    ConfigureAction action = m_injector.getInstance(ConfigureAction.class);
+    action.setExecutionCommand(executionCommand);
+    action.setHostRoleCommand(hostRoleCommand);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
+    CommandReport report = action.execute(null);
+    assertNotNull(report);
 
-    HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
+    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    HostVersionEntity entity = new HostVersionEntity();
-    entity.setHostEntity(hostDAO.findByName(hostName));
-    entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(HDP_21_STACK, HDP_2_2_1_0));
-    entity.setState(RepositoryVersionState.UPGRADED);
-    hostVersionDAO.create(entity);
+    config = c.getDesiredConfigByType("zoo.cfg");
+    assertNotNull(config);
+    assertEquals("version2", config.getTag());
+    assertEquals("11", config.getProperties().get("initLimit"));
   }
 
+  /**
+   * Tests that DELETE "*" with edit preserving works correctly.
+   *
+   * @throws Exception
+   */
   @Test
-  public void testConfigActionUpgradeAcrossStack() throws Exception {
+  public void testDeletePreserveChanges() throws Exception {
     makeUpgradeCluster();
 
     Cluster c = m_injector.getInstance(Clusters.class).getCluster("c1");
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
-    c.setDesiredStackVersion(HDP_22_STACK);
+    c.setDesiredStackVersion(HDP_21_STACK);
+
+    // create a config for zoo.cfg with two values; one is a stack value and the
+    // other is custom
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
-          put("initLimit", "10");
-        }}, new HashMap<String, Map<String,String>>());
+    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+      {
+        put("tickTime", "2000");
+        put("foo", "bar");
+      }
+    }, new HashMap<String, Map<String, String>>());
     config.setTag("version2");
     config.persist();
 
@@ -182,24 +191,30 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-
     Map<String, String> commandParams = new HashMap<String, String>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_1_0);
     commandParams.put("clusterName", "c1");
     commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
-    commandParams.put(ConfigureTask.PARAMETER_KEY, "initLimit");
-    commandParams.put(ConfigureTask.PARAMETER_VALUE, "11");
+
+    // delete all keys, preserving edits or additions
+    List<ConfigureTask.Transfer> transfers = new ArrayList<ConfigureTask.Transfer>();
+    ConfigureTask.Transfer transfer = new ConfigureTask.Transfer();
+    transfer.operation = TransferOperation.DELETE;
+    transfer.deleteKey = "*";
+    transfer.preserveEdits = true;
+    transfers.add(transfer);
+
+    commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
     executionCommand.setClusterName("c1");
+    executionCommand.setRoleParams(new HashMap<String, String>());
+    executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
 
-    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
-        null, null);
-
-    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(
-        executionCommand));
+    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
 
     ConfigureAction action = m_injector.getInstance(ConfigureAction.class);
     action.setExecutionCommand(executionCommand);
@@ -208,12 +223,17 @@ public class ConfigureActionTest {
     CommandReport report = action.execute(null);
     assertNotNull(report);
 
-    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
-
+    // make sure there are now 3 versions after the merge
+    assertEquals(3, c.getConfigsByType("zoo.cfg").size());
     config = c.getDesiredConfigByType("zoo.cfg");
     assertNotNull(config);
-    assertEquals("version2", config.getTag());
-    assertEquals("11", config.getProperties().get("initLimit"));
+    assertFalse("version2".equals(config.getTag()));
+
+    // time to check our values; there should only be 1 left since tickTime was
+    // removed
+    Map<String, String> map = config.getProperties();
+    assertEquals("bar", map.get("foo"));
+    assertFalse(map.containsKey("tickTime"));
   }
 
   @Test
@@ -338,9 +358,106 @@ public class ConfigureActionTest {
     assertEquals(4, c.getConfigsByType("zoo.cfg").size());
     config = c.getDesiredConfigByType("zoo.cfg");
     map = config.getProperties();
-    assertEquals(2, map.size());
+    assertEquals(6, map.size());
     assertTrue(map.containsKey("initLimit")); // it just changed to 11 from 10
     assertTrue(map.containsKey("copyKey")); // is new
   }
 
+  private void makeUpgradeCluster() throws Exception {
+    String clusterName = "c1";
+    String hostName = "h1";
+
+    Clusters clusters = m_injector.getInstance(Clusters.class);
+    clusters.addCluster(clusterName, HDP_21_STACK);
+
+    StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
+    StackEntity stackEntity = stackDAO.find(HDP_21_STACK.getStackName(),
+        HDP_21_STACK.getStackVersion());
+
+    assertNotNull(stackEntity);
+
+    Cluster c = clusters.getCluster(clusterName);
+    c.setDesiredStackVersion(HDP_21_STACK);
+
+    // !!! very important, otherwise the loops that walk the list of installed
+    // service properties will not run!
+    installService(c, "ZOOKEEPER");
+
+    ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
+    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+      {
+        put("initLimit", "10");
+      }
+    }, new HashMap<String, Map<String, String>>());
+    config.setTag("version1");
+    config.persist();
+
+    c.addConfig(config);
+    c.addDesiredConfig("user", Collections.singleton(config));
+
+    // add a host component
+    clusters.addHost(hostName);
+
+    Host host = clusters.getHost(hostName);
+
+    Map<String, String> hostAttributes = new HashMap<String, String>();
+    hostAttributes.put("os_family", "redhat");
+    hostAttributes.put("os_release_version", "6");
+    host.setHostAttributes(hostAttributes);
+    host.persist();
+
+    String urlInfo = "[{'repositories':["
+        + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'HDP-2.1.1'}"
+        + "], 'OperatingSystems/os_type':'redhat6'}]";
+
+    m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_2_0_0);
+    repoVersionDAO.create(stackEntity, HDP_2_2_1_0, String.valueOf(System.currentTimeMillis()),
+        "pack", urlInfo);
+
+    c.createClusterVersion(HDP_21_STACK, HDP_2_2_0_0, "admin", RepositoryVersionState.UPGRADING);
+    c.createClusterVersion(HDP_21_STACK, HDP_2_2_1_0, "admin", RepositoryVersionState.INSTALLING);
+
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_0_0, RepositoryVersionState.CURRENT);
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.INSTALLED);
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADING);
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADED);
+    c.setCurrentStackVersion(HDP_21_STACK);
+
+    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
+        RepositoryVersionState.CURRENT);
+
+    HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
+
+    HostVersionEntity entity = new HostVersionEntity();
+    entity.setHostEntity(hostDAO.findByName(hostName));
+    entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(HDP_21_STACK, HDP_2_2_1_0));
+    entity.setState(RepositoryVersionState.UPGRADED);
+    hostVersionDAO.create(entity);
+
+    // verify that our configs are there
+    String tickTime = m_configHelper.getPropertyValueFromStackDefinitions(c, "zoo.cfg", "tickTime");
+    assertNotNull(tickTime);
+  }
+
+  /**
+   * Installs a service in the cluster.
+   *
+   * @param cluster
+   * @param serviceName
+   * @return
+   * @throws AmbariException
+   */
+  private Service installService(Cluster cluster, String serviceName) throws AmbariException {
+    Service service = null;
+
+    try {
+      service = cluster.getService(serviceName);
+    } catch (ServiceNotFoundException e) {
+      service = serviceFactory.createNew(cluster, serviceName);
+      cluster.addService(service);
+      service.persist();
+    }
+
+    return service;
+  }
 }

+ 35 - 0
ambari-server/src/test/python/TestUtils.py

@@ -191,3 +191,38 @@ class TestUtils(TestCase):
     isfile_mock.return_value = True
 
     self.assertEquals(utils.check_exitcode("/tmp/nofile"), 777)
+
+
+  def test_format_with_reload(self):
+    from resource_management.libraries.functions import format
+    from resource_management.libraries.functions.format import ConfigurationFormatter
+    from resource_management.core.environment import Environment
+
+    env = Environment()
+    env._instances.append(env)
+
+
+    # declare some environment variables
+    env_params = {}
+    env_params["envfoo"] = "env-foo1"
+    env_params["envbar"] = "env-bar1"
+    env.config.params = env_params
+
+    # declare some local variables
+    foo = "foo1"
+    bar = "bar1"
+
+    # make sure local variables and env variables work
+    message = "{foo} {bar} {envfoo} {envbar}"
+    formatted_message = format(message)
+    self.assertEquals("foo1 bar1 env-foo1 env-bar1", formatted_message)
+
+    # try the same thing with an instance; we pass in keyword args to be
+    # combined with the env params
+    formatter = ConfigurationFormatter()
+    formatted_message = formatter.format(message, foo="foo2", bar="bar2")
+    self.assertEquals("foo2 bar2 env-foo1 env-bar1", formatted_message)
+
+    # now supply keyword args to override env params
+    formatted_message = formatter.format(message, envfoo="foobar", envbar="foobarbaz", foo="foo3", bar="bar3")
+    self.assertEquals("foo3 bar3 foobar foobarbaz", formatted_message)

+ 3 - 1
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -1263,6 +1263,8 @@ class TestNamenode(RMFTestCase):
 
   @patch("resource_management.core.shell.call")
   def test_pre_rolling_restart_23_params(self, call_mock):
+    import itertools
+
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/nn_ru_lzo.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -1279,7 +1281,7 @@ class TestNamenode(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
-                       call_mocks = [(0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)],
+                       call_mocks = itertools.cycle([(0, None)]),
                        mocks_dict = mocks_dict)
     import sys
     self.assertEquals("/usr/hdp/2.3.0.0-1234/hadoop/conf", sys.modules["params"].hadoop_conf_dir)

+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py

@@ -995,7 +995,7 @@ class TestOozieServer(RMFTestCase):
       isfile_mock, exists_mock, isdir_mock, tarfile_open_mock):
 
     isdir_mock.return_value = True
-    exists_mock.side_effect = [False,False,True]
+    exists_mock.return_value = False
     isfile_mock.return_value = True
 
     prepare_war_stdout = """INFO: Adding extension: libext/mysql-connector-java.jar

+ 62 - 0
ambari-server/src/test/resources/stacks/HDP/2.0.5/services/ZOOKEEPER/configuration/zoo.cfg.xml

@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>tickTime</name>
+    <value>2000</value>
+    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+  </property>
+  <property>
+    <name>initLimit</name>
+    <value>10</value>
+    <description>Ticks to allow for sync at Init.</description>
+  </property>
+  <property>
+    <name>syncLimit</name>
+    <value>5</value>
+    <description>Ticks to allow for sync at Runtime.</description>
+  </property>
+  <property>
+    <name>clientPort</name>
+    <value>2181</value>
+    <description>Port for running ZK Server.</description>
+  </property>
+  <property>
+    <name>dataDir</name>
+    <value>/hadoop/zookeeper</value>
+    <description>Data directory for ZooKeeper.</description>
+  </property>
+  <property>
+    <name>autopurge.snapRetainCount</name>
+    <value>30</value>
+    <description>ZooKeeper purge feature retains the autopurge.snapRetainCount
+      most recent snapshots and the corresponding transaction
+      logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
+  </property>
+  <property>
+    <name>autopurge.purgeInterval</name>
+    <value>24</value>
+    <description>The time interval in hours for which the purge task has to be triggered.
+      Set to a positive integer (1 and above) to enable the auto purging.</description>
+  </property>
+</configuration>