Browse Source

AMBARI-12188. Intra stack-version upgrade sends stale configs in commands to hosts (srimanth)

Srimanth Gunturi 10 năm trước cách đây
mục cha
commit
1ac5c58e66

+ 34 - 8
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java

@@ -17,22 +17,25 @@
  */
 package org.apache.ambari.server.actionmanager;
 
-import com.google.inject.Inject;
-import com.google.inject.Injector;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
 
 public class ExecutionCommandWrapper {
   @Inject
@@ -50,6 +53,7 @@ public class ExecutionCommandWrapper {
     this.executionCommand = executionCommand;
   }
 
+  @SuppressWarnings("serial")
   public ExecutionCommand getExecutionCommand() {
     if (executionCommand != null) {
       return executionCommand;
@@ -73,10 +77,32 @@ public class ExecutionCommandWrapper {
         try {
           Cluster cluster = clusters.getClusterById(clusterId);
           ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+          Map<String, Map<String, String>> configurationTags = executionCommand.getConfigurationTags();
+
+          // Execution commands have config-tags already set during their creation. However, these
+          // tags become stale at runtime when other ExecutionCommands run and change the desired
+          // configs (like ConfigureAction). Hence an ExecutionCommand can specify which config-types
+          // should be refreshed at runtime. Specifying <code>*</code> will result in all config-type
+          // tags to be refreshed to the latest cluster desired-configs.
+          Set<String> refreshConfigTagsBeforeExecution = executionCommand.getForceRefreshConfigTagsBeforeExecution();
+          if (refreshConfigTagsBeforeExecution != null && !refreshConfigTagsBeforeExecution.isEmpty()) {
+            Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
+            for (String refreshConfigTag : refreshConfigTagsBeforeExecution) {
+              if ("*".equals(refreshConfigTag)) {
+                for (final Entry<String, DesiredConfig> desiredConfig : desiredConfigs.entrySet()) {
+                  configurationTags.put(desiredConfig.getKey(), new HashMap<String, String>() {{
+                    put("tag", desiredConfig.getValue().getTag());
+                  }});
+                }
+                break;
+              } else if (configurationTags.containsKey(refreshConfigTag) && desiredConfigs.containsKey(refreshConfigTag)) {
+                configurationTags.get(refreshConfigTag).put("tag", desiredConfigs.get(refreshConfigTag).getTag());
+              }
+            }
+          }
 
           Map<String, Map<String, String>> configProperties = configHelper
-            .getEffectiveConfigProperties(cluster,
-              executionCommand.getConfigurationTags());
+            .getEffectiveConfigProperties(cluster, configurationTags);
 
           // Apply the configurations saved with the Execution Cmd on top of
           // derived configs - This will take care of all the hacks

+ 22 - 0
ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java

@@ -90,6 +90,9 @@ public class ExecutionCommand extends AgentCommand {
   @SerializedName("forceRefreshConfigTags")
   private Set<String> forceRefreshConfigTags = new HashSet<String>();
 
+  @SerializedName("forceRefreshConfigTagsBeforeExecution")
+  private Set<String> forceRefreshConfigTagsBeforeExecution = new HashSet<String>();
+
   @SerializedName("commandParams")
   private Map<String, String> commandParams = new HashMap<String, String>();
 
@@ -231,6 +234,19 @@ public class ExecutionCommand extends AgentCommand {
     this.forceRefreshConfigTags = forceRefreshConfigTags;
   }
 
+  /**
+   * Comma separated list of config-types whose tags have be refreshed
+   * at runtime before being executed. If all config-type tags have to be
+   * refreshed, "*" can be specified.
+   */
+  public Set<String> getForceRefreshConfigTagsBeforeExecution() {
+    return forceRefreshConfigTagsBeforeExecution;
+  }
+
+  public void setForceRefreshConfigTagsBeforeExecution(Set<String> forceRefreshConfigTagsBeforeExecution) {
+    this.forceRefreshConfigTagsBeforeExecution = forceRefreshConfigTagsBeforeExecution;
+  }
+
   public Map<String, Map<String, Map<String, String>>> getConfigurationAttributes() {
     return configurationAttributes;
   }
@@ -332,6 +348,12 @@ public class ExecutionCommand extends AgentCommand {
     String HOST_SYS_PREPPED = "host_sys_prepped";
     String MAX_DURATION_OF_RETRIES = "max_duration_for_retries";
     String COMMAND_RETRY_ENABLED = "command_retry_enabled";
+    /**
+     * Comma separated list of config-types whose tags have be refreshed
+     * at runtime before being executed. If all config-type tags have to be
+     * refreshed, "*" can be specified.
+     */
+    String REFRESH_CONFIG_TAGS_BEFORE_EXECUTION = "forceRefreshConfigTagsBeforeExecution";
 
     String SERVICE_CHECK = "SERVICE_CHECK"; // TODO: is it standard command? maybe add it to RoleCommand enum?
     String CUSTOM_COMMAND = "custom_command";

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java

@@ -330,6 +330,10 @@ public class AmbariCustomCommandExecutionHelper {
         execCmd.setForceRefreshConfigTags(parseAndValidateComponentsMapping(actionExecutionContext.getParameters().get(KeyNames.REFRESH_ADITIONAL_COMPONENT_TAGS)));
       }
 
+      if(actionExecutionContext.getParameters() != null && actionExecutionContext.getParameters().containsKey(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION)){
+        execCmd.setForceRefreshConfigTagsBeforeExecution(parseAndValidateComponentsMapping(actionExecutionContext.getParameters().get(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION)));
+      }
+
       Map<String, String> hostLevelParams = new TreeMap<String, String>();
 
       hostLevelParams.put(CUSTOM_COMMAND, commandName);
@@ -549,6 +553,9 @@ public class AmbariCustomCommandExecutionHelper {
     execCmd.setConfigurations(configurations);
     execCmd.setConfigurationAttributes(configurationAttributes);
     execCmd.setConfigurationTags(configTags);
+    if(actionParameters != null && actionParameters.containsKey(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION)){
+      execCmd.setForceRefreshConfigTagsBeforeExecution(parseAndValidateComponentsMapping(actionParameters.get(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION)));
+    }
 
     // Generate cluster host info
     execCmd.setClusterHostInfo(
@@ -924,6 +931,10 @@ public class AmbariCustomCommandExecutionHelper {
           actionExecutionContext.getParameters().put(KeyNames.REFRESH_ADITIONAL_COMPONENT_TAGS, requestParams.get(KeyNames.REFRESH_ADITIONAL_COMPONENT_TAGS));
         }
 
+        if(requestParams.containsKey(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION)){
+          actionExecutionContext.getParameters().put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, requestParams.get(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION));
+        }
+
         RequestOperationLevel operationLevel = actionExecutionContext.getOperationLevel();
         if (operationLevel != null) {
           String clusterName = operationLevel.getClusterName();

+ 16 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java

@@ -44,6 +44,7 @@ import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.resources.UpgradeResourceDefinition;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -845,6 +846,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     params.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase());
     params.put(COMMAND_PARAM_ORIGINAL_STACK, context.getOriginalStackId().getStackId());
     params.put(COMMAND_PARAM_TARGET_STACK, context.getTargetStackId().getStackId());
+    // Setting REFRESH_CONFIG_TAGS_BEFORE_EXECUTION is required during upgrade to
+    // force a refresh of config-tags just before execution. Generally upgrade packs
+    // have config updates which tend to make the persisted config-tags in the
+    // ExecutionCommand stale.
+    params.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "*");
 
     // Because custom task may end up calling a script/function inside a service, it is necessary to set the
     // service_package_folder and hooks_folder params.
@@ -924,6 +930,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         context.getOriginalStackId().getStackId());
     restartCommandParams.put(COMMAND_PARAM_TARGET_STACK,
         context.getTargetStackId().getStackId());
+    // Setting REFRESH_CONFIG_TAGS_BEFORE_EXECUTION is required during upgrade to
+    // force a refresh of config-tags just before execution. Generally upgrade packs
+    // have config updates which tend to make the persisted config-tags in the
+    // ExecutionCommand stale.
+    restartCommandParams.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "*");
 
     ActionExecutionContext actionContext = new ActionExecutionContext(
         cluster.getClusterName(), "RESTART",
@@ -980,6 +991,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         context.getOriginalStackId().getStackId());
     commandParams.put(COMMAND_PARAM_TARGET_STACK,
         context.getTargetStackId().getStackId());
+    // Setting REFRESH_CONFIG_TAGS_BEFORE_EXECUTION is required during upgrade to
+    // force a refresh of config-tags just before execution. Generally upgrade packs
+    // have config updates which tend to make the persisted config-tags in the
+    // ExecutionCommand stale.
+    commandParams.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "*");
 
     ActionExecutionContext actionContext = new ActionExecutionContext(
         cluster.getClusterName(), "SERVICE_CHECK",

+ 360 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java

@@ -0,0 +1,360 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.dao.UpgradeDAO;
+import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
+import org.apache.ambari.server.orm.entities.UpgradeItemEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.StageUtils;
+import org.apache.ambari.server.view.ViewRegistry;
+import org.easymock.EasyMock;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.gson.Gson;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.persist.PersistService;
+import com.google.inject.util.Modules;
+
+/**
+ * UpgradeResourceDefinition tests.
+ */
+public class UpgradeResourceProviderHDP22Test {
+
+  private UpgradeDAO upgradeDao = null;
+  private RepositoryVersionDAO repoVersionDao = null;
+  private Injector injector;
+  private Clusters clusters;
+  private OrmTestHelper helper;
+  private AmbariManagementController amc;
+  private ConfigHelper configHelper;
+  private StackDAO stackDAO;
+
+  private static final String configTagVersion1 = "version1";
+  private static final String configTagVersion2 = "version2";
+  @SuppressWarnings("serial")
+  private static final Map<String, String> configTagVersion1Properties = new HashMap<String, String>() {
+    {
+      put("hive.server2.thrift.port", "10000");
+    }
+  };
+
+  @SuppressWarnings({ "serial", "unchecked" })
+  @Before
+  public void before() throws Exception {
+    // setup the config helper for placeholder resolution
+    configHelper = EasyMock.createNiceMock(ConfigHelper.class);
+
+    expect(configHelper.getPlaceholderValueFromDesiredConfigurations(EasyMock.anyObject(Cluster.class), EasyMock.eq("{{foo/bar}}"))).andReturn(
+        "placeholder-rendered-properly").anyTimes();
+
+    expect(configHelper.getDefaultProperties(EasyMock.anyObject(StackId.class), EasyMock.anyObject(Cluster.class))).andReturn(
+        new HashMap<String, Map<String, String>>()).anyTimes();
+
+    expect(configHelper.getEffectiveConfigAttributes(EasyMock.anyObject(Cluster.class), EasyMock.anyObject(Map.class))).andReturn(
+        new HashMap<String, Map<String, Map<String, String>>>()).anyTimes();
+
+    expect(configHelper.getEffectiveDesiredTags(EasyMock.anyObject(Cluster.class), EasyMock.eq("h1"))).andReturn(new HashMap<String, Map<String, String>>() {
+      {
+        put("hive-site", new HashMap<String, String>() {
+          {
+            put("tag", configTagVersion1);
+          }
+        });
+      }
+    }).anyTimes();
+
+    expect(configHelper.getEffectiveConfigProperties(EasyMock.anyObject(Cluster.class), EasyMock.anyObject(Map.class))).andReturn(
+        new HashMap<String, Map<String, String>>() {
+          {
+            put("hive-site", configTagVersion1Properties);
+          }
+        }).anyTimes();
+
+    EasyMock.replay(configHelper);
+
+    // create an injector which will inject the mocks
+    injector = Guice.createInjector(Modules.override(new InMemoryDefaultTestModule()).with(new MockModule()));
+
+    injector.getInstance(GuiceJpaInitializer.class);
+
+    helper = injector.getInstance(OrmTestHelper.class);
+
+    amc = injector.getInstance(AmbariManagementController.class);
+
+    Field field = AmbariServer.class.getDeclaredField("clusterController");
+    field.setAccessible(true);
+    field.set(null, amc);
+
+    stackDAO = injector.getInstance(StackDAO.class);
+    upgradeDao = injector.getInstance(UpgradeDAO.class);
+    repoVersionDao = injector.getInstance(RepositoryVersionDAO.class);
+
+    AmbariEventPublisher publisher = createNiceMock(AmbariEventPublisher.class);
+    replay(publisher);
+    ViewRegistry.initInstance(new ViewRegistry(publisher));
+
+    StackEntity stackEntity = stackDAO.find("HDP", "2.2.0");
+
+    RepositoryVersionEntity repoVersionEntity = new RepositoryVersionEntity();
+    repoVersionEntity.setDisplayName("For Stack Version 2.2.0");
+    repoVersionEntity.setOperatingSystems("");
+    repoVersionEntity.setStack(stackEntity);
+    repoVersionEntity.setUpgradePackage("upgrade_test");
+    repoVersionEntity.setVersion("2.2.0.0");
+    repoVersionDao.create(repoVersionEntity);
+
+    repoVersionEntity = new RepositoryVersionEntity();
+    repoVersionEntity.setDisplayName("For Stack Version 2.2.4.2");
+    repoVersionEntity.setOperatingSystems("");
+    repoVersionEntity.setStack(stackEntity);
+    repoVersionEntity.setUpgradePackage("upgrade_test");
+    repoVersionEntity.setVersion("2.2.4.2");
+    repoVersionDao.create(repoVersionEntity);
+
+    clusters = injector.getInstance(Clusters.class);
+
+    StackId stackId = new StackId("HDP-2.2.0");
+    clusters.addCluster("c1", stackId);
+    Cluster cluster = clusters.getCluster("c1");
+
+    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin", RepositoryVersionState.UPGRADING);
+    cluster.transitionClusterVersion(stackId, stackId.getStackVersion(), RepositoryVersionState.CURRENT);
+
+    clusters.addHost("h1");
+    Host host = clusters.getHost("h1");
+    Map<String, String> hostAttributes = new HashMap<String, String>();
+    hostAttributes.put("os_family", "redhat");
+    hostAttributes.put("os_release_version", "6.3");
+    host.setHostAttributes(hostAttributes);
+    host.setState(HostState.HEALTHY);
+    host.persist();
+
+    clusters.mapHostToCluster("h1", "c1");
+
+    // add a single HIVE server
+    Service service = cluster.addService("HIVE");
+    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    service.persist();
+
+    ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
+    ServiceComponentHost sch = component.addServiceComponentHost("h1");
+    sch.setVersion("2.2.0.0");
+
+    component = service.addServiceComponent("HIVE_CLIENT");
+    sch = component.addServiceComponentHost("h1");
+    sch.setVersion("2.2.0.0");
+    TopologyManager topologyManager = new TopologyManager();
+    StageUtils.setTopologyManager(topologyManager);
+    ActionManager.setTopologyManager(topologyManager);
+  }
+
+  @After
+  public void after() {
+    injector.getInstance(PersistService.class).stop();
+    injector = null;
+  }
+
+  /**
+   * Tests upgrades from HDP-2.2.x to HDP-2.2.y
+   * 
+   * @throws Exception
+   */
+  @SuppressWarnings("serial")
+  @Test
+  public void testCreateIntraStackUpgrade() throws Exception {
+    // We want to use the HDP-2.2 'upgrade_test' catalog
+    // Create HDP-2.2 stack
+
+    Cluster cluster = clusters.getCluster("c1");
+    StackId oldStack = cluster.getDesiredStackVersion();
+
+    for (Service s : cluster.getServices().values()) {
+      assertEquals(oldStack, s.getDesiredStackVersion());
+
+      for (ServiceComponent sc : s.getServiceComponents().values()) {
+        assertEquals(oldStack, sc.getDesiredStackVersion());
+
+        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
+          assertEquals(oldStack, sch.getDesiredStackVersion());
+        }
+      }
+    }
+
+    Config config = new ConfigImpl("hive-site");
+    config.setProperties(configTagVersion1Properties);
+    config.setTag(configTagVersion1);
+
+    cluster.addConfig(config);
+    cluster.addDesiredConfig("admin", Collections.singleton(config));
+
+    Map<String, Object> requestProps = new HashMap<String, Object>();
+    requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.4.2");
+
+    ResourceProvider upgradeResourceProvider = createProvider(amc);
+
+    Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null);
+    upgradeResourceProvider.createResources(request);
+
+    List<UpgradeEntity> upgrades = upgradeDao.findUpgrades(cluster.getClusterId());
+    assertEquals(1, upgrades.size());
+
+    UpgradeEntity upgrade = upgrades.get(0);
+    assertEquals(3, upgrade.getUpgradeGroups().size());
+
+    UpgradeGroupEntity group = upgrade.getUpgradeGroups().get(2);
+    assertEquals(2, group.getItems().size());
+
+    group = upgrade.getUpgradeGroups().get(0);
+    assertEquals(2, group.getItems().size());
+    UpgradeItemEntity item = group.getItems().get(1);
+    assertEquals("Value is set for the source stack upgrade pack", "Goo", item.getText());
+
+    assertTrue(cluster.getDesiredConfigs().containsKey("hive-site"));
+
+    StackId newStack = cluster.getDesiredStackVersion();
+
+    assertTrue(oldStack.equals(newStack));
+
+    for (Service s : cluster.getServices().values()) {
+      assertEquals(newStack, s.getDesiredStackVersion());
+
+      for (ServiceComponent sc : s.getServiceComponents().values()) {
+        assertEquals(newStack, sc.getDesiredStackVersion());
+
+        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
+          assertEquals(newStack, sch.getDesiredStackVersion());
+        }
+      }
+    }
+
+    // Hive service checks have generated the ExecutionCommands by now.
+    // Change the new desired config tag and verify execution command picks up new tag
+    assertEquals(configTagVersion1, cluster.getDesiredConfigByType("hive-site").getTag());
+    final Config newConfig = new ConfigImpl("hive-site");
+    newConfig.setProperties(new HashMap<String, String>() {
+      {
+        put("hive.server2.thrift.port", "10010");
+      }
+    });
+    newConfig.setTag(configTagVersion2);
+    Set<Config> desiredConfigs = new HashSet<Config>() {
+      {
+        add(newConfig);
+      }
+    };
+    cluster.addConfig(newConfig);
+    cluster.addDesiredConfig("admin", desiredConfigs);
+    assertEquals(configTagVersion2, cluster.getDesiredConfigByType("hive-site").getTag());
+    Gson gson = new Gson();
+    List<ExecutionCommandEntity> currentExecutionCommands = injector.getInstance(ExecutionCommandDAO.class).findAll();
+    for (ExecutionCommandEntity ece : currentExecutionCommands) {
+      String executionCommandJson = new String(ece.getCommand());
+      Map<String, Object> commandMap = gson.<Map<String, Object>> fromJson(executionCommandJson, Map.class);
+      if ("SERVICE_CHECK".equals(commandMap.get("roleCommand")) || "RESTART".equals(commandMap.get("roleCommand"))) {
+        assertTrue(commandMap.containsKey(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION));
+        Object object = commandMap.get(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION);
+        assertTrue(object instanceof List);
+        @SuppressWarnings("unchecked")
+        List<String> tags = (List<String>) commandMap.get(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION);
+        assertEquals(1, tags.size());
+        assertEquals("*", tags.get(0));
+
+        // Verify latest tag is being used.
+        ExecutionCommandWrapper executionCommandWrapper = new ExecutionCommandWrapper(executionCommandJson);
+        ExecutionCommand executionCommand = executionCommandWrapper.getExecutionCommand();
+        Map<String, Map<String, String>> configurationTags = executionCommand.getConfigurationTags();
+        assertEquals(configTagVersion2, configurationTags.get("hive-site").get("tag"));
+      }
+    }
+  }
+
+  /**
+   * @param amc
+   * @return the provider
+   */
+  private UpgradeResourceProvider createProvider(AmbariManagementController amc) {
+    return new UpgradeResourceProvider(amc);
+  }
+
+  /**
+   *
+   */
+  private class MockModule implements Module {
+    /**
+   *
+   */
+    @Override
+    public void configure(Binder binder) {
+      binder.bind(ConfigHelper.class).toInstance(configHelper);
+    }
+  }
+}