Browse Source

AMBARI-6762. Include configuration in exported blueprints.

John Speidel 11 năm trước cách đây
mục cha
commit
8d464c2b28

+ 360 - 40
ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java

@@ -19,6 +19,7 @@
 package org.apache.ambari.server.api.query.render;
 
 import org.apache.ambari.server.api.query.QueryInfo;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.Request;
 import org.apache.ambari.server.api.services.Result;
 import org.apache.ambari.server.api.services.ResultImpl;
@@ -26,15 +27,25 @@ import org.apache.ambari.server.api.services.ResultPostProcessor;
 import org.apache.ambari.server.api.services.ResultPostProcessorImpl;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.api.util.TreeNodeImpl;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor;
+import org.apache.ambari.server.controller.internal.HostGroup;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.HostConfig;
+import org.apache.ambari.server.state.PropertyInfo;
 
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -44,6 +55,18 @@ import java.util.Set;
  */
 public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
 
+  /**
+   * Management Controller used to get stack information.
+   */
+  private AmbariManagementController controller = AmbariServer.getController();
+
+  /**
+   * Map of configuration type to configuration properties which are required that a user
+   * input.  These properties will be stripped from the exported blueprint.
+   */
+  private Map<String, Collection<String>> propertiesToStrip = new HashMap<String, Collection<String>>();
+
+
   // ----- Renderer ----------------------------------------------------------
 
   @Override
@@ -55,6 +78,17 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
         null, properties, queryProperties.getName());
 
     copyPropertiesToResult(queryProperties, resultTree);
+
+    String configType = Resource.Type.Configuration.name();
+    if (resultTree.getChild(configType) == null) {
+      resultTree.addChild(new HashSet<String>(), configType);
+    }
+
+    String serviceType = Resource.Type.Service.name();
+    if (resultTree.getChild(serviceType) == null) {
+      resultTree.addChild(new HashSet<String>(), serviceType);
+    }
+
     String hostType = Resource.Type.Host.name();
     String hostComponentType = Resource.Type.HostComponent.name();
     TreeNode<Set<String>> hostComponentNode = resultTree.getChild(
@@ -67,6 +101,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
       }
       hostComponentNode = hostNode.addChild(new HashSet<String>(), hostComponentType);
     }
+    resultTree.getChild(configType).getObject().add("properties");
     hostComponentNode.getObject().add("HostRoles/component_name");
 
     return resultTree;
@@ -106,50 +141,141 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
   private Resource createBlueprintResource(TreeNode<Resource> clusterNode) {
     Resource clusterResource = clusterNode.getObject();
     Resource blueprintResource = new ResourceImpl(Resource.Type.Cluster);
-    String clusterName = (String) clusterResource.getPropertyValue(
-        PropertyHelper.getPropertyId("Clusters", "cluster_name"));
-    //todo: deal with name collision?
-    String blueprintName = "blueprint-" + clusterName;
+
     String[] stackTokens = ((String) clusterResource.getPropertyValue(
             PropertyHelper.getPropertyId("Clusters", "version"))).split("-");
 
-    blueprintResource.setProperty("Blueprints/blueprint_name", blueprintName);
     blueprintResource.setProperty("Blueprints/stack_name", stackTokens[0]);
     blueprintResource.setProperty("Blueprints/stack_version", stackTokens[1]);
-    blueprintResource.setProperty(
-        "host_groups", processHostGroups(clusterNode.getChild("hosts")));
+
+    Collection<HostGroupImpl> hostGroups =  processHostGroups(clusterNode.getChild("hosts"));
+
+    List<Map<String, Object>> groupList = formatGroupsAsList(hostGroups);
+    blueprintResource.setProperty("host_groups", groupList);
+
+    determinePropertiesToStrip(clusterNode.getChild("services"), stackTokens[0], stackTokens[1]);
+
+    blueprintResource.setProperty("configurations", processConfigurations(clusterNode, hostGroups));
 
     return blueprintResource;
   }
 
   /**
-   * Process host group information for all hosts.
+   * Determine which configuration properties need to be stripped from the configuration prior to exporting.
+   * Stripped properties are any property which are marked as required in the stack definition.  For example,
+   * all passwords are required properties and are therefore not exported.
+   *
+   * @param servicesNode  services node
+   * @param stackName     stack name
+   * @param stackVersion  stack version
+   */
+  private void determinePropertiesToStrip(TreeNode<Resource> servicesNode, String stackName, String stackVersion) {
+    AmbariMetaInfo stackInfo = getController().getAmbariMetaInfo();
+    for (TreeNode<Resource> service : servicesNode.getChildren()) {
+      String name = (String) service.getObject().getPropertyValue("ServiceInfo/service_name");
+      Map<String, PropertyInfo> requiredProperties = stackInfo.getRequiredProperties(stackName, stackVersion, name);
+      for (Map.Entry<String, PropertyInfo> entry : requiredProperties.entrySet()) {
+        String propertyName = entry.getKey();
+        PropertyInfo propertyInfo = entry.getValue();
+        String configCategory = propertyInfo.getFilename();
+        if (configCategory.endsWith(".xml")) {
+          configCategory = configCategory.substring(0, configCategory.indexOf(".xml"));
+        }
+        Collection<String> categoryProperties = propertiesToStrip.get(configCategory);
+        if (categoryProperties == null) {
+          categoryProperties = new ArrayList<String>();
+          propertiesToStrip.put(configCategory, categoryProperties);
+        }
+        categoryProperties.add(propertyName);
+      }
+    }
+  }
+
+  /**
+   * Process cluster scoped configurations.
    *
-   * @param hostNode a host node
+   * @param clusterNode  cluster node
+   * @param hostGroups   all host groups
    *
-   * @return list of host group property maps, one element for each host group
+   * @return cluster configuration
    */
-  private List<Map<String, Object>> processHostGroups(TreeNode<Resource> hostNode) {
-    Map<HostGroup, HostGroup> mapHostGroups = new HashMap<HostGroup, HostGroup>();
+  private List<Map<String, Map<String, String>>>  processConfigurations(TreeNode<Resource> clusterNode,
+                                                                        Collection<HostGroupImpl> hostGroups) {
+
+    List<Map<String, Map<String, String>>> configList = new ArrayList<Map<String, Map<String, String>>>();
+
+    Map<String, Object> desiredConfigMap = clusterNode.getObject().getPropertiesMap().get("Clusters/desired_configs");
+    TreeNode<Resource> configNode = clusterNode.getChild("configurations");
+    for (TreeNode<Resource> config : configNode.getChildren()) {
+      Configuration configuration = new Configuration(config);
+      DesiredConfig desiredConfig = (DesiredConfig) desiredConfigMap.get(configuration.getType());
+      if (desiredConfig != null && desiredConfig.getTag().equals(configuration.getTag())) {
+        Map<String, Map<String, String>> properties = Collections.singletonMap(
+            configuration.getType(), configuration.getProperties());
+
+        BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+        properties = updater.doUpdateForBlueprintExport(hostGroups);
+        configList.add(properties);
+      }
+    }
+    return configList;
+  }
+
+  /**
+   * Process cluster host groups.
+   *
+   * @param hostNode  host node
+   *
+   * @return collection of host groups
+   */
+  private Collection<HostGroupImpl> processHostGroups(TreeNode<Resource> hostNode) {
+    Map<HostGroupImpl, HostGroupImpl> mapHostGroups = new HashMap<HostGroupImpl, HostGroupImpl>();
+    int count = 1;
     for (TreeNode<Resource> host : hostNode.getChildren()) {
-      HostGroup group = HostGroup.parse(host);
+      HostGroupImpl group = new HostGroupImpl(host);
+      String hostName = (String) host.getObject().getPropertyValue(
+          PropertyHelper.getPropertyId("Hosts", "host_name"));
+
       if (mapHostGroups.containsKey(group)) {
-        mapHostGroups.get(group).incrementCardinality();
+        HostGroupImpl hostGroup = mapHostGroups.get(group);
+        hostGroup.incrementCardinality();
+        hostGroup.addHost(hostName);
       } else {
         mapHostGroups.put(group, group);
+        group.setName("host_group_" + count++);
+        group.addHost(hostName);
       }
     }
+    return mapHostGroups.values();
+  }
 
-    int count = 1;
+
+  /**
+   * Process host group information for all hosts.
+   *
+   * @param hostGroups all host groups
+   *
+   * @return list of host group property maps, one element for each host group
+   */
+  private List<Map<String, Object>> formatGroupsAsList(Collection<HostGroupImpl> hostGroups) {
     List<Map<String, Object>> listHostGroups = new ArrayList<Map<String, Object>>();
-    for (HostGroup group : mapHostGroups.values()) {
-      String groupName = "host_group_" + count++;
+    for (HostGroupImpl group : hostGroups) {
       Map<String, Object> mapGroupProperties = new HashMap<String, Object>();
       listHostGroups.add(mapGroupProperties);
 
-      mapGroupProperties.put("name", groupName);
+      mapGroupProperties.put("name", group.getName());
       mapGroupProperties.put("cardinality", String.valueOf(group.getCardinality()));
       mapGroupProperties.put("components", processHostGroupComponents(group));
+      List<Map<String, Map<String, String>>> hostConfigurations = new ArrayList<Map<String, Map<String, String>>>();
+      for (Configuration configuration : group.getConfigurations()) {
+        Map<String, Map<String, String>> propertyMap = Collections.singletonMap(
+            configuration.getType(), configuration.properties);
+        BlueprintConfigurationProcessor configurationProcessor = new BlueprintConfigurationProcessor(propertyMap);
+        Map<String, Map<String, String>> updatedProps = configurationProcessor.doUpdateForBlueprintExport(hostGroups);
+        hostConfigurations.add(updatedProps);
+
+      }
+      mapGroupProperties.put("configurations", hostConfigurations);
     }
     return listHostGroups;
   }
@@ -161,7 +287,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
    *
    * @return list of component names for the host
    */
-  private List<Map<String, String>> processHostGroupComponents(HostGroup group) {
+  private List<Map<String, String>> processHostGroupComponents(HostGroupImpl group) {
     List<Map<String, String>> listHostGroupComponents = new ArrayList<Map<String, String>>();
     for (String component : group.getComponents()) {
       Map<String, String> mapComponentProperties = new HashMap<String, String>();
@@ -183,16 +309,37 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
     return isCollection != null && isCollection.equals("true");
   }
 
+  /**
+   * Get management controller instance.
+   *
+   * @return  management controller
+   */
+  protected AmbariManagementController getController() {
+    return controller;
+  }
+
   // ----- Host Group inner class --------------------------------------------
 
   /**
    * Host Group representation.
    */
-  private static class HostGroup {
+  private class HostGroupImpl implements HostGroup {
+
+    /**
+     * Host Group name.
+     *
+     */
+    private String name;
+
     /**
      * Associated components.
      */
-    private Set<String> m_components = new HashSet<String>();
+    private Set<String> components = new HashSet<String>();
+
+    /**
+     * Host group scoped configurations.
+     */
+    private Collection<Configuration> configurations = new HashSet<Configuration>();
 
     /**
      * Number of instances.
@@ -200,35 +347,107 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
     private int m_cardinality = 1;
 
     /**
-     * Factory method for obtaining a host group instance.
-     * Parses a host tree node for host related information.
-     *
-     * @param host  host tree node
-     *
-     * @return a new HostGroup instance
+     * Collection of associated hosts.
      */
-    public static HostGroup parse(TreeNode<Resource> host) {
-      HostGroup group = new HostGroup();
+    private Collection<String> hosts = new HashSet<String>();
 
+    /**
+     * Constructor.
+     *
+     * @param host  host node
+     */
+    public HostGroupImpl(TreeNode<Resource> host) {
       TreeNode<Resource> components = host.getChild("host_components");
       for (TreeNode<Resource> component : components.getChildren()) {
-        group.getComponents().add((String) component.getObject().getPropertyValue(
+        getComponents().add((String) component.getObject().getPropertyValue(
             "HostRoles/component_name"));
       }
-
-      group.addAmbariComponentIfLocalhost((String) host.getObject().getPropertyValue(
+      addAmbariComponentIfLocalhost((String) host.getObject().getPropertyValue(
           PropertyHelper.getPropertyId("Hosts", "host_name")));
 
-      return group;
+      processGroupConfiguration(host);
     }
 
-    /**                                                           `
-     * Obtain associated components.
+    /**
+     * Preocess host group configuration.
      *
-     * @return set of associated components
+     * @param host  host node
      */
+    private void processGroupConfiguration(TreeNode<Resource> host) {
+      Map<String, Object> desiredConfigMap = host.getObject().getPropertiesMap().get("Hosts/desired_configs");
+      if (desiredConfigMap != null) {
+        for (Map.Entry<String, Object> entry : desiredConfigMap.entrySet()) {
+          String type = entry.getKey();
+          HostConfig hostConfig = (HostConfig) entry.getValue();
+          Map<Long, String> overrides = hostConfig.getConfigGroupOverrides();
+
+          if (overrides != null && ! overrides.isEmpty()) {
+            Long version = Collections.max(overrides.keySet());
+            String tag = overrides.get(version);
+            TreeNode<Resource> clusterNode = host.getParent().getParent();
+            TreeNode<Resource> configNode = clusterNode.getChild("configurations");
+            for (TreeNode<Resource> config : configNode.getChildren()) {
+              Configuration configuration = new Configuration(config);
+              if (type.equals(configuration.getType()) && tag.equals(configuration.getTag())) {
+                getConfigurations().add(configuration);
+                break;
+              }
+            }
+          }
+        }
+      }
+    }
+
+    @Override
+    public String getName() {
+      return name;
+    }
+
+    @Override
     public Set<String> getComponents() {
-      return m_components;
+      return components;
+    }
+
+    @Override
+    public Collection<String> getHostInfo() {
+      return hosts;
+    }
+
+    @Override
+    public Map<String, Map<String, String>> getConfigurationProperties() {
+      Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+      for (Configuration configuration : configurations) {
+        properties.put(configuration.getType(), configuration.getProperties());
+      }
+
+      return properties;
+    }
+
+    /**
+     * Set the name.
+     *
+     * @param  name name of host group
+     */
+    public void setName(String name) {
+      this.name = name;
+    }
+
+    /**
+     * Add a host.
+     *
+     * @param host  host to add
+     */
+    public void addHost(String host) {
+      hosts.add(host);
+    }
+
+    /**
+     * Obtain associated host group scoped configurations.
+     *
+     * @return collection of host group scoped configurations
+     */
+    public Collection<Configuration> getConfigurations() {
+      return configurations;
     }
 
     /**
@@ -273,14 +492,115 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
       if (this == o) return true;
       if (o == null || getClass() != o.getClass()) return false;
 
-      HostGroup hostGroup = (HostGroup) o;
+      HostGroupImpl hostGroup = (HostGroupImpl) o;
 
-      return m_components.equals(hostGroup.m_components);
+      return components.equals(hostGroup.components) &&
+          configurations.equals(hostGroup.configurations);
     }
 
     @Override
     public int hashCode() {
-      return m_components.hashCode();
+      int result = components.hashCode();
+      result = 31 * result + configurations.hashCode();
+      return result;
+    }
+  }
+
+  /**
+   * Encapsulates a configuration.
+   */
+  private class Configuration {
+    /**
+     * Configuration type such as hdfs-site.
+     */
+    private String type;
+
+    /**
+     * Configuration tag.
+     */
+    private String tag;
+
+    /**
+     * Properties of the configuration.
+     */
+    private Map<String, String> properties = new HashMap<String, String>();
+
+    /**
+     * Constructor.
+     *
+     * @param configNode  configuration node
+     */
+    @SuppressWarnings("unchecked")
+    public Configuration(TreeNode<Resource> configNode) {
+      Resource configResource = configNode.getObject();
+      type = (String) configResource.getPropertyValue("type");
+      tag  = (String) configResource.getPropertyValue("tag");
+
+      // property map type is currently <String, Object>
+      properties = (Map) configNode.getObject().getPropertiesMap().get("properties");
+      stripRequiredProperties(properties);
+    }
+
+    /**
+     * Get configuration type.
+     *
+     * @return configuration type
+     */
+    public String getType() {
+      return type;
+    }
+
+    /**
+     * Get configuration tag.
+     *
+     * @return configuration tag
+     */
+    public String getTag() {
+      return tag;
+    }
+
+    /**
+     * Get configuration properties.
+     *
+     * @return map of properties and values
+     */
+    public Map<String, String> getProperties() {
+      return properties;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      Configuration that = (Configuration) o;
+      return tag.equals(that.tag) && type.equals(that.type) && properties.equals(that.properties);
+    }
+
+    @Override
+    public int hashCode() {
+      int result = type.hashCode();
+      result = 31 * result + tag.hashCode();
+      result = 31 * result + properties.hashCode();
+      return result;
+    }
+
+    /**
+     * Strip required properties from configuration.
+     *
+     * @param properties  property map
+     */
+    private void stripRequiredProperties(Map<String, String> properties) {
+      Iterator<Map.Entry<String, String>> iter = properties.entrySet().iterator();
+      while (iter.hasNext()) {
+        Map.Entry<String, String> entry = iter.next();
+        String property = entry.getKey();
+        String category = getType();
+        Collection<String> categoryProperties = propertiesToStrip.get(category);
+        if (categoryProperties != null && categoryProperties.contains(property)) {
+          iter.remove();
+        }
+      }
     }
   }
 

+ 8 - 4
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -1080,16 +1080,19 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
     else {
+      boolean includeProps = request.includeProperties();
       if (null != request.getType()) {
         Map<String, Config> configs = cluster.getConfigsByType(
             request.getType());
 
         if (null != configs) {
           for (Entry<String, Config> entry : configs.entrySet()) {
+            Config config = entry.getValue();
             ConfigurationResponse response = new ConfigurationResponse(
                 cluster.getClusterName(), request.getType(),
-                entry.getValue().getTag(), entry.getValue().getVersion(), new HashMap<String, String>(),
-                new HashMap<String, Map<String,String>>());
+                config.getTag(), entry.getValue().getVersion(),
+                includeProps ? config.getProperties() : new HashMap<String, String>(),
+                includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>());
             responses.add(response);
           }
         }
@@ -1099,8 +1102,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
         for (Config config : all) {
           ConfigurationResponse response = new ConfigurationResponse(
-             cluster.getClusterName(), config.getType(), config.getTag(), config.getVersion(),
-             new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
+              cluster.getClusterName(), config.getType(), config.getTag(), config.getVersion(),
+              includeProps ? config.getProperties() : new HashMap<String, String>(),
+              includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>());
 
           responses.add(response);
         }

+ 20 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java

@@ -34,6 +34,7 @@ public class ConfigurationRequest {
   private Map<String, String> configs;
   private boolean selected = true;
   private Map<String, Map<String, String>> configsAttributes;
+  private boolean includeProperties;
 
   public ConfigurationRequest() {
     configs = new HashMap<String, String>();
@@ -52,6 +53,7 @@ public class ConfigurationRequest {
     this.tag = tag;
     this.configs = configs;
     this.configsAttributes = configsAttributes;
+    this.includeProperties = (type != null && tag != null);
   }
 
   /**
@@ -127,6 +129,24 @@ public class ConfigurationRequest {
     return selected;
   }
 
+  /**
+   * Set whether properties should be included.
+   *
+   * @param includeProperties whether properties should be included
+   */
+  public void setIncludeProperties(boolean includeProperties) {
+    this.includeProperties = includeProperties;
+  }
+
+  /**
+   * Determine whether properties should be included.
+   *
+   * @return  true if properties should be included; false otherwise
+   */
+  public boolean includeProperties()  {
+    return this.includeProperties;
+  }
+
   /**
    * @return Attributes of configs
    */

+ 41 - 41
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java

@@ -84,9 +84,9 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
    *
    * @return collection of host groups which contain the specified component
    */
-  protected Collection<HostGroup> getHostGroupsForComponent(String component, Collection<HostGroup> hostGroups) {
-    Collection<HostGroup> resultGroups = new HashSet<HostGroup>();
-    for (HostGroup group : hostGroups ) {
+  protected Collection<HostGroupImpl> getHostGroupsForComponent(String component, Collection<HostGroupImpl> hostGroups) {
+    Collection<HostGroupImpl> resultGroups = new HashSet<HostGroupImpl>();
+    for (HostGroupImpl group : hostGroups ) {
       if (group.getComponents().contains(component)) {
         resultGroups.add(group);
       }
@@ -102,11 +102,11 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
    *
    * @return map of host group name to host group
    */
-  protected Map<String, HostGroup> parseBlueprintHostGroups(BlueprintEntity blueprint, Stack stack) {
-    Map<String, HostGroup> mapHostGroups = new HashMap<String, HostGroup>();
+  protected Map<String, HostGroupImpl> parseBlueprintHostGroups(BlueprintEntity blueprint, Stack stack) {
+    Map<String, HostGroupImpl> mapHostGroups = new HashMap<String, HostGroupImpl>();
 
     for (HostGroupEntity hostGroup : blueprint.getHostGroups()) {
-      mapHostGroups.put(hostGroup.getName(), new HostGroup(hostGroup, stack));
+      mapHostGroups.put(hostGroup.getName(), new HostGroupImpl(hostGroup, stack));
     }
     return mapHostGroups;
   }
@@ -149,14 +149,14 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
    */
   protected BlueprintEntity validateTopology(BlueprintEntity blueprint) throws AmbariException {
     Stack stack = new Stack(blueprint.getStackName(), blueprint.getStackVersion());
-    Map<String, HostGroup> hostGroupMap = parseBlueprintHostGroups(blueprint, stack);
-    Collection<HostGroup> hostGroups = hostGroupMap.values();
+    Map<String, HostGroupImpl> hostGroupMap = parseBlueprintHostGroups(blueprint, stack);
+    Collection<HostGroupImpl> hostGroups = hostGroupMap.values();
     Map<String, Map<String, String>> clusterConfig = processBlueprintConfigurations(blueprint, null);
     Map<String, Map<String, Collection<DependencyInfo>>> missingDependencies =
         new HashMap<String, Map<String, Collection<DependencyInfo>>>();
 
     Collection<String> services = getTopologyServices(hostGroups);
-    for (HostGroup group : hostGroups) {
+    for (HostGroupImpl group : hostGroups) {
       Map<String, Collection<DependencyInfo>> missingGroupDependencies =
           group.validateTopology(hostGroups, services, clusterConfig);
       if (! missingGroupDependencies.isEmpty()) {
@@ -311,9 +311,9 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
    *
    * @return collections of all services provided by topology
    */
-  protected Collection<String> getTopologyServices(Collection<HostGroup> hostGroups) {
+  protected Collection<String> getTopologyServices(Collection<HostGroupImpl> hostGroups) {
     Collection<String> services = new HashSet<String>();
-    for (HostGroup group : hostGroups) {
+    for (HostGroupImpl group : hostGroups) {
       services.addAll(group.getServices());
     }
     return services;
@@ -359,7 +359,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
    * @return collection of missing component information
    */
   private Collection<String> verifyComponentCardinalityCount(BlueprintEntity blueprint,
-                                                             Collection<HostGroup> hostGroups,
+                                                             Collection<HostGroupImpl> hostGroups,
                                                              String component,
                                                              Cardinality cardinality,
                                                              AutoDeployInfo autoDeploy,
@@ -374,11 +374,11 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
       if (! validated && autoDeploy != null && autoDeploy.isEnabled() && cardinality.supportsAutoDeploy()) {
         String coLocateName = autoDeploy.getCoLocate();
         if (coLocateName != null && ! coLocateName.isEmpty()) {
-          Collection<HostGroup> coLocateHostGroups = getHostGroupsForComponent(
+          Collection<HostGroupImpl> coLocateHostGroups = getHostGroupsForComponent(
               coLocateName.split("/")[1], hostGroups);
           if (! coLocateHostGroups.isEmpty()) {
             validated = true;
-            HostGroup group = coLocateHostGroups.iterator().next();
+            HostGroupImpl group = coLocateHostGroups.iterator().next();
             if (group.addComponent(component)) {
               addComponentToBlueprint(blueprint, group.getEntity().getName(), component);
             }
@@ -405,7 +405,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
    * @return collection of missing component information
    */
   private Collection<String> verifyComponentInAllHostGroups(BlueprintEntity blueprint,
-                                                            Collection<HostGroup> hostGroups,
+                                                            Collection<HostGroupImpl> hostGroups,
                                                             String component,
                                                             AutoDeployInfo autoDeploy) {
 
@@ -413,7 +413,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
     int actualCount = getHostGroupsForComponent(component, hostGroups).size();
     if (actualCount != hostGroups.size()) {
       if (autoDeploy != null && autoDeploy.isEnabled()) {
-        for (HostGroup group : hostGroups) {
+        for (HostGroupImpl group : hostGroups) {
           if (group.addComponent(component)) {
             addComponentToBlueprint(blueprint, group.getEntity().getName(), component);
           }
@@ -846,7 +846,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
   /**
    * Host group representation.
    */
-  protected class HostGroup {
+  protected class HostGroupImpl implements HostGroup {
     /**
      * Host group entity
      */
@@ -885,13 +885,28 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
      * @param hostGroup  host group
      * @param stack      stack
      */
-    public HostGroup(HostGroupEntity hostGroup, Stack stack) {
+    public HostGroupImpl(HostGroupEntity hostGroup, Stack stack) {
       this.hostGroup = hostGroup;
       this.stack = stack;
       parseComponents();
       parseConfigurations();
     }
 
+    @Override
+    public String getName() {
+      return hostGroup.getName();
+    }
+
+    @Override
+    public Collection<String> getComponents() {
+      return this.components;
+    }
+
+    @Override
+    public Collection<String> getHostInfo() {
+      return this.hosts;
+    }
+
     /**
      * Associate a host with the host group.
      *
@@ -901,15 +916,6 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
       this.hosts.add(fqdn);
     }
 
-    /**
-     * Get associated host information.
-     *
-     * @return collection of hosts associated with the host group
-     */
-    public Collection<String> getHostInfo() {
-      return this.hosts;
-    }
-
     /**
      * Get the services which are deployed to this host group.
      *
@@ -919,16 +925,6 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
       return componentsForService.keySet();
     }
 
-    /**
-     * Get the components associated with the host group.
-     *
-     * @return  collection of component names for the host group
-     */
-    public Collection<String> getComponents() {
-      return this.components;
-    }
-
-
     /**
      * Add a component to the host group.
      *
@@ -969,7 +965,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
      *
      * @return map of configuration type to a map of properties
      */
-    public Map<String, Map<String, String>> getConfigurations() {
+    public Map<String, Map<String, String>> getConfigurationProperties() {
       return configurations;
     }
 
@@ -991,7 +987,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
      *
      * @return map of component to missing dependencies
      */
-    public Map<String, Collection<DependencyInfo>> validateTopology(Collection<HostGroup> hostGroups,
+    public Map<String, Collection<DependencyInfo>> validateTopology(Collection<HostGroupImpl> hostGroups,
                                                                     Collection<String> services,
                                                                     Map<String, Map<String, String>> clusterConfig) {
 
@@ -1059,8 +1055,12 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
           typeProperties = new HashMap<String, String>();
           configurations.put(type, typeProperties);
         }
-        configurations.put(type, jsonSerializer.<Map<String, String>>fromJson(
-            configEntity.getConfigData(), Map.class));
+        Map<String, String> propertyMap =  jsonSerializer.<Map<String, String>>fromJson(
+            configEntity.getConfigData(), Map.class);
+
+        if (propertyMap != null) {
+          typeProperties.putAll(propertyMap);
+        }
       }
     }
   }

+ 700 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -0,0 +1,700 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Updates configuration properties based on cluster topology.  This is done when exporting
+ * a blueprint and when a cluster is provisioned via a blueprint.
+ */
+public class BlueprintConfigurationProcessor {
+
+  /**
+   * Single host topology updaters
+   */
+  private static Map<String, Map<String, PropertyUpdater>> singleHostTopologyUpdaters =
+      new HashMap<String, Map<String, PropertyUpdater>>();
+
+  /**
+   * Multi host topology updaters
+   */
+  private static Map<String, Map<String, PropertyUpdater>> multiHostTopologyUpdaters =
+      new HashMap<String, Map<String, PropertyUpdater>>();
+
+  /**
+   * Database host topology updaters
+   */
+  private static Map<String, Map<String, PropertyUpdater>> dbHostTopologyUpdaters =
+      new HashMap<String, Map<String, PropertyUpdater>>();
+
+  /**
+   * Updaters for properties which need 'm' appended
+   */
+  private static Map<String, Map<String, PropertyUpdater>> mPropertyUpdaters =
+      new HashMap<String, Map<String, PropertyUpdater>>();
+
+  /**
+   * Collection of all updaters
+   */
+  private static Collection<Map<String, Map<String, PropertyUpdater>>> allUpdaters =
+      new ArrayList<Map<String, Map<String, PropertyUpdater>>>();
+
+  /**
+   * Compiled regex for hostgroup token.
+   */
+  private static Pattern HOSTGROUP_REGEX = Pattern.compile("%HOSTGROUP::(\\S+)%");
+
+  /**
+   * Compiled regex for hostgroup token with port information.
+   */
+  private static Pattern HOSTGROUP_PORT_REGEX = Pattern.compile("%HOSTGROUP::(\\w+|\\d+)%:?(\\d+)?");
+
+  /**
+   * Configuration properties to be updated
+   */
+  private Map<String, Map<String, String>> properties;
+
+
+  /**
+   * Constructor.
+   *
+   * @param properties  properties to update
+   */
+  public BlueprintConfigurationProcessor(Map<String, Map<String, String>> properties) {
+    this.properties = properties;
+  }
+
+  /**
+   * Update properties for cluster creation.  This involves updating topology related properties with
+   * concrete topology information.
+   *
+   * @param hostGroups  host groups of cluster to be deployed
+   *
+   * @return  updated properties
+   */
+  public Map<String, Map<String, String>> doUpdateForClusterCreate(Map<String, ? extends HostGroup> hostGroups) {
+    for (Map<String, Map<String, PropertyUpdater>> updaterMap : allUpdaters) {
+      for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
+        String type = entry.getKey();
+        for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
+          String propertyName = updaterEntry.getKey();
+          PropertyUpdater updater = updaterEntry.getValue();
+
+          Map<String, String> typeMap = properties.get(type);
+          if (typeMap != null && typeMap.containsKey(propertyName)) {
+            typeMap.put(propertyName, updater.updateForClusterCreate(
+                hostGroups, typeMap.get(propertyName), properties));
+          }
+        }
+      }
+    }
+    return properties;
+  }
+
+  /**
+   * Update properties for blueprint export.
+   * This involves converting concrete topology information to host groups.
+   *
+   * @param hostGroups  cluster host groups
+   *
+   * @return  updated properties
+   */
+  public Map<String, Map<String, String>> doUpdateForBlueprintExport(Collection<? extends HostGroup> hostGroups) {
+    doSingleHostExportUpdate(hostGroups, singleHostTopologyUpdaters);
+    doSingleHostExportUpdate(hostGroups, dbHostTopologyUpdaters);
+    doMultiHostExportUpdate(hostGroups, multiHostTopologyUpdaters);
+
+    return properties;
+  }
+
+  /**
+   * Update single host topology configuration properties for blueprint export.
+   *
+   * @param hostGroups  cluster export
+   * @param updaters    registered updaters
+   */
+  private void doSingleHostExportUpdate(Collection<? extends HostGroup> hostGroups,
+                                        Map<String, Map<String, PropertyUpdater>> updaters) {
+
+    for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaters.entrySet()) {
+      String type = entry.getKey();
+      for (String propertyName : entry.getValue().keySet()) {
+        boolean matchedHost = false;
+
+        Map<String, String> typeProperties = properties.get(type);
+        if (typeProperties != null && typeProperties.containsKey(propertyName)) {
+          String propValue = typeProperties.get(propertyName);
+          for (HostGroup group : hostGroups) {
+            Collection<String> hosts = group.getHostInfo();
+            for (String host : hosts) {
+              if (propValue.contains(host)) {    //todo: need to use regular expression to avoid matching a host which is a superset.  Can this be fixed???
+                matchedHost = true;
+                typeProperties.put(propertyName, propValue.replace(
+                    host, "%HOSTGROUP::" + group.getName() + "%"));
+                break;
+              }
+            }
+            if (matchedHost) {
+              break;
+            }
+          }
+          if (! matchedHost) {
+            typeProperties.remove(propertyName);
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Update multi host topology configuration properties for blueprint export.
+   *
+   * @param hostGroups  cluster host groups
+   * @param updaters    registered updaters
+   */
+  private void doMultiHostExportUpdate(Collection<? extends HostGroup> hostGroups,
+                                       Map<String, Map<String, PropertyUpdater>> updaters) {
+
+    for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaters.entrySet()) {
+      String type = entry.getKey();
+      for (String propertyName : entry.getValue().keySet()) {
+        Map<String, String> typeProperties = properties.get(type);
+        if (typeProperties != null && typeProperties.containsKey(propertyName)) {
+          String propValue = typeProperties.get(propertyName);
+          for (HostGroup group : hostGroups) {
+            Collection<String> hosts = group.getHostInfo();
+            for (String host : hosts) {
+              propValue = propValue.replaceAll(host + "\\b", "%HOSTGROUP::" + group.getName() + "%");
+            }
+          }
+          Collection<String> addedGroups = new HashSet<String>();
+          String[] toks = propValue.split(",");
+          boolean inBrackets = propValue.startsWith("[");
+
+          StringBuilder sb = new StringBuilder();
+          if (inBrackets) {
+            sb.append('[');
+          }
+          boolean firstTok = true;
+          for (String tok : toks) {
+            tok = tok.replaceAll("[\\[\\]]", "");
+
+            if (addedGroups.add(tok)) {
+              if (! firstTok) {
+                sb.append(',');
+              }
+              sb.append(tok);
+            }
+            firstTok = false;
+          }
+
+          if (inBrackets) {
+            sb.append(']');
+          }
+          typeProperties.put(propertyName, sb.toString());
+        }
+      }
+    }
+  }
+
+  /**
+   * Get host groups which contain a component.
+   *
+   * @param component   component name
+   * @param hostGroups  collection of host groups to check
+   *
+   * @return collection of host groups which contain the specified component
+   */
+  private static Collection<HostGroup> getHostGroupsForComponent(String component,
+                                                                 Collection<? extends HostGroup> hostGroups) {
+
+    Collection<HostGroup> resultGroups = new HashSet<HostGroup>();
+    for (HostGroup group : hostGroups ) {
+      if (group.getComponents().contains(component)) {
+        resultGroups.add(group);
+      }
+    }
+    return resultGroups;
+  }
+
+  /**
+   * Convert a property value which includes a host group topology token to a physical host.
+   *
+   * @param hostGroups  cluster host groups
+   * @param val         value to be converted
+   *
+   * @return updated value with physical host name
+   */
+  private static Collection<String> getHostStrings(Map<String, ? extends HostGroup> hostGroups,
+                                                   String val) {
+
+    Collection<String> hosts = new HashSet<String>();
+    Matcher m = HOSTGROUP_PORT_REGEX.matcher(val);
+    while (m.find()) {
+      String groupName = m.group(1);
+      String port = m.group(2);
+
+
+      HostGroup hostGroup = hostGroups.get(groupName);
+      if (hostGroup == null) {
+        throw new IllegalArgumentException(
+            "Unable to match blueprint host group token to a host group: " + groupName);
+      }
+      for (String host : hostGroup.getHostInfo()) {
+        if (port != null) {
+          host += ":" + port;
+        }
+        hosts.add(host);
+      }
+    }
+    return hosts;
+  }
+
+  /**
+   * Provides functionality to update a property value.
+   */
+  public interface PropertyUpdater {
+    /**
+     * Update a property value.
+     *
+     *
+     * @param hostGroups  host groups
+     * @param origValue   original value of property
+     * @param properties  all properties
+     *
+     * @return new property value
+     */
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups,
+                                         String origValue, Map<String, Map<String, String>> properties);
+  }
+
+  /**
+   * Topology based updater which replaces the original host name of a property with the host name
+   * which runs the associated (master) component in the new cluster.
+   */
+  private static class SingleHostTopologyUpdater implements PropertyUpdater {
+    /**
+     * Component name
+     */
+    private String component;
+
+    /**
+     * Constructor.
+     *
+     * @param component  component name associated with the property
+     */
+    public SingleHostTopologyUpdater(String component) {
+      this.component = component;
+    }
+
+    /**
+     * Update the property with the new host name which runs the associated component.
+     *
+     *
+     * @param hostGroups  host groups
+     * @param origValue   original value of property
+     * @param properties  all properties
+     *
+     * @return updated property value with old host name replaced by new host name
+     */
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups,
+                                         String origValue,
+                                         Map<String, Map<String, String>> properties)  {
+
+      Matcher m = HOSTGROUP_REGEX.matcher(origValue);
+      if (m.find()) {
+        String hostGroupName = m.group(1);
+        HostGroup hostGroup = hostGroups.get(hostGroupName);
+        //todo: ensure > 0 hosts (is this necessary)
+        return origValue.replace(m.group(0), hostGroup.getHostInfo().iterator().next());
+      } else {
+        Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values());
+        if (matchingGroups.size() == 1) {
+          return origValue.replace("localhost", matchingGroups.iterator().next().getHostInfo().iterator().next());
+        } else {
+          throw new IllegalArgumentException("Unable to update configuration property with topology information. " +
+              "Component '" + this.component + "' is not mapped to any host group or is mapped to multiple groups.");
+        }
+      }
+    }
+  }
+
+  /**
+   * Topology based updater which replaces the original host name of a database property with the host name
+   * where the DB is deployed in the new cluster.  If an existing database is specified, the original property
+   * value is returned.
+   */
+  private static class DBTopologyUpdater extends SingleHostTopologyUpdater {
+    /**
+     * Property type (global, core-site ...) for property which is used to determine if DB is external.
+     */
+    private final String configPropertyType;
+
+    /**
+     * Name of property which is used to determine if DB is new or existing (exernal).
+     */
+    private final String conditionalPropertyName;
+
+    /**
+     * Constructor.
+     *
+     * @param component                component to get hot name if new DB
+     * @param conditionalPropertyType  config type of property used to determine if DB is external
+     * @param conditionalPropertyName  name of property which is used to determine if DB is external
+     */
+    private DBTopologyUpdater(String component, String conditionalPropertyType,
+                              String conditionalPropertyName) {
+      super(component);
+      this.configPropertyType = conditionalPropertyType;
+      this.conditionalPropertyName = conditionalPropertyName;
+    }
+
+    /**
+     * If database is a new managed database, update the property with the new host name which
+     * runs the associated component.  If the database is external (non-managed), return the
+     * original value.
+     *
+     *
+     * @param hostGroups  host groups
+     * @param origValue   original value of property
+     * @param properties  all properties
+     *
+     * @return updated property value with old host name replaced by new host name or original value
+     *         if the database is external
+     */
+    @Override
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups,
+                                         String origValue, Map<String, Map<String, String>> properties) {
+
+      if (isDatabaseManaged(properties)) {
+        return super.updateForClusterCreate(hostGroups, origValue, properties);
+      } else {
+        return origValue;
+      }
+    }
+
+    /**
+     * Determine if database is managed, meaning that it is a component in the cluster topology.
+     *
+     * @return true if the DB is managed; false otherwise
+     */
+    private boolean isDatabaseManaged(Map<String, Map<String, String>> properties) {
+      // conditional property should always exist since it is required to be specified in the stack
+      return properties.get(configPropertyType).
+          get(conditionalPropertyName).startsWith("New");
+    }
+  }
+
+  /**
+   * Topology based updater which replaces original host names (possibly more than one) contained in a property
+   * value with the host names which runs the associated component in the new cluster.
+   */
+  private static class MultipleHostTopologyUpdater implements PropertyUpdater {
+    /**
+     * Component name
+     */
+    private String component;
+
+    /**
+     * Separator for multiple property values
+     */
+    private Character separator = ',';
+
+    /**
+     * Constructor.
+     *
+     * @param component  component name associated with the property
+     */
+    public MultipleHostTopologyUpdater(String component) {
+      this.component = component;
+    }
+
+    /**
+     * Update all host names included in the original property value with new host names which run the associated
+     * component.
+     *
+     *
+     * @param hostGroups  host groups
+     * @param origValue   original value of property
+     * @param properties  all properties
+     *
+     * @return updated property value with old host names replaced by new host names
+     */
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups,
+                                         String origValue,
+                                         Map<String, Map<String, String>> properties) {
+
+      Collection<String> hostStrings = getHostStrings(hostGroups, origValue);
+      if (hostStrings.isEmpty()) {
+        //default non-exported original value
+        String port = null;
+        if (origValue.contains(":")) {
+          //todo: currently assuming all hosts are using same port
+          port = origValue.substring(origValue.indexOf(":") + 1);
+        }
+        Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values());
+        for (HostGroup group : matchingGroups) {
+          for (String host : group.getHostInfo()) {
+            if (port != null) {
+              host += ":" + port;
+            }
+            hostStrings.add(host);
+          }
+        }
+      }
+
+      StringBuilder sb = new StringBuilder();
+      boolean firstHost = true;
+      for (String host : hostStrings) {
+        if (!firstHost) {
+          sb.append(separator);
+        } else {
+          firstHost = false;
+        }
+        sb.append(host);
+      }
+
+      return sb.toString();
+    }
+  }
+
+  /**
+   * Updater which appends "m" to the original property value.
+   * For example, "1024" would be updated to "1024m".
+   */
+  private static class MPropertyUpdater implements PropertyUpdater {
+    /**
+     * Append 'm' to the original property value if it doesn't already exist.
+     *
+     *
+     * @param hostGroups  host groups
+     * @param origValue   original value of property
+     * @param properties  all properties
+     *
+     * @return property with 'm' appended
+     */
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups,
+                                         String origValue, Map<String,
+        Map<String, String>> properties) {
+
+      return origValue.endsWith("m") ? origValue : origValue + 'm';
+    }
+  }
+
+  /**
+   * Class to facilitate special formatting needs of property values.
+   */
+  private abstract static class AbstractPropertyValueDecorator implements PropertyUpdater {
+    PropertyUpdater propertyUpdater;
+
+    /**
+     * Constructor.
+     *
+     * @param propertyUpdater  wrapped updater
+     */
+    public AbstractPropertyValueDecorator(PropertyUpdater propertyUpdater) {
+      this.propertyUpdater = propertyUpdater;
+    }
+
+    /**
+     * Return decorated form of the updated input property value.
+     *
+     * @param hostGroupMap  map of host group name to HostGroup
+     * @param origValue     original value of property
+     * @param properties    all properties
+     *
+     * @return Formatted output string
+     */
+    @Override
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroupMap,
+                                         String origValue,
+                                         Map<String, Map<String, String>> properties) {
+
+      return doFormat(propertyUpdater.updateForClusterCreate(hostGroupMap, origValue, properties));
+    }
+
+    /**
+     * Transform input string to required output format.
+     *
+     * @param originalValue  original value of property
+     *
+     * @return formatted output string
+     */
+    public abstract String doFormat(String originalValue);
+  }
+
+  /**
+   * Return properties of the form ['value']
+   */
+  private static class YamlMultiValuePropertyDecorator extends AbstractPropertyValueDecorator {
+
+    public YamlMultiValuePropertyDecorator(PropertyUpdater propertyUpdater) {
+      super(propertyUpdater);
+    }
+
+    /**
+     * Format input String of the form, str1,str2 to ['str1','str2']
+     *
+     * @param origValue  input string
+     *
+     * @return formatted string
+     */
+    @Override
+    public String doFormat(String origValue) {
+      StringBuilder sb = new StringBuilder();
+      if (origValue != null) {
+        sb.append("[");
+        boolean isFirst = true;
+        for (String value : origValue.split(",")) {
+          if (!isFirst) {
+            sb.append(",");
+          } else {
+            isFirst = false;
+          }
+          sb.append("'");
+          sb.append(value);
+          sb.append("'");
+        }
+        sb.append("]");
+      }
+      return sb.toString();
+    }
+  }
+
+  /**
+   * Register updaters for configuration properties.
+   */
+  static {
+
+    allUpdaters.add(singleHostTopologyUpdaters);
+    allUpdaters.add(multiHostTopologyUpdaters);
+    allUpdaters.add(dbHostTopologyUpdaters);
+    allUpdaters.add(mPropertyUpdaters);
+
+    Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> mapredSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> coreSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> yarnSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hiveSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> oozieSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> stormSiteMap = new HashMap<String, PropertyUpdater>();
+
+    Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>();
+
+    Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>();
+
+    Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
+
+
+    singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
+    singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
+    singleHostTopologyUpdaters.put("core-site", coreSiteMap);
+    singleHostTopologyUpdaters.put("hbase-site", hbaseSiteMap);
+    singleHostTopologyUpdaters.put("yarn-site", yarnSiteMap);
+    singleHostTopologyUpdaters.put("hive-site", hiveSiteMap);
+    singleHostTopologyUpdaters.put("oozie-site", oozieSiteMap);
+    singleHostTopologyUpdaters.put("storm-site", stormSiteMap);
+
+    mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
+    mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
+    mPropertyUpdaters.put("mapred-env", mapredEnvMap);
+
+    multiHostTopologyUpdaters.put("webhcat-site", multiWebhcatSiteMap);
+    multiHostTopologyUpdaters.put("hbase-site", multiHbaseSiteMap);
+    multiHostTopologyUpdaters.put("storm-site", multiStormSiteMap);
+
+    dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap);
+
+    // NAMENODE
+    hdfsSiteMap.put("dfs.http.address", new SingleHostTopologyUpdater("NAMENODE"));
+    hdfsSiteMap.put("dfs.https.address", new SingleHostTopologyUpdater("NAMENODE"));
+    coreSiteMap.put("fs.default.name", new SingleHostTopologyUpdater("NAMENODE"));
+    hdfsSiteMap.put("dfs.namenode.http-address", new SingleHostTopologyUpdater("NAMENODE"));
+    hdfsSiteMap.put("dfs.namenode.https-address", new SingleHostTopologyUpdater("NAMENODE"));
+    coreSiteMap.put("fs.defaultFS", new SingleHostTopologyUpdater("NAMENODE"));
+    hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE"));
+
+    // SECONDARY_NAMENODE
+    hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
+    hdfsSiteMap.put("dfs.namenode.secondary.http-address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
+
+    // JOBTRACKER
+    mapredSiteMap.put("mapred.job.tracker", new SingleHostTopologyUpdater("JOBTRACKER"));
+    mapredSiteMap.put("mapred.job.tracker.http.address", new SingleHostTopologyUpdater("JOBTRACKER"));
+    mapredSiteMap.put("mapreduce.history.server.http.address", new SingleHostTopologyUpdater("JOBTRACKER"));
+
+
+    // HISTORY_SERVER
+    yarnSiteMap.put("yarn.log.server.url", new SingleHostTopologyUpdater("HISTORYSERVER"));
+    mapredSiteMap.put("mapreduce.jobhistory.webapp.address", new SingleHostTopologyUpdater("HISTORYSERVER"));
+    mapredSiteMap.put("mapreduce.jobhistory.address", new SingleHostTopologyUpdater("HISTORYSERVER"));
+
+    // RESOURCEMANAGER
+    yarnSiteMap.put("yarn.resourcemanager.hostname", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
+    yarnSiteMap.put("yarn.resourcemanager.resource-tracker.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
+    yarnSiteMap.put("yarn.resourcemanager.webapp.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
+    yarnSiteMap.put("yarn.resourcemanager.scheduler.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
+    yarnSiteMap.put("yarn.resourcemanager.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
+    yarnSiteMap.put("yarn.resourcemanager.admin.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
+
+    // HIVE_SERVER
+    hiveSiteMap.put("hive.metastore.uris", new SingleHostTopologyUpdater("HIVE_SERVER"));
+    dbHiveSiteMap.put("javax.jdo.option.ConnectionURL",
+        new DBTopologyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
+
+    // OOZIE_SERVER
+    oozieSiteMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER"));
+
+    // ZOOKEEPER_SERVER
+    multiHbaseSiteMap.put("hbase.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+    multiWebhcatSiteMap.put("templeton.zookeeper.hosts", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+
+    // STORM
+    stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS"));
+    stormSiteMap.put("worker.childopts", new SingleHostTopologyUpdater("GANGLIA_SERVER"));
+    stormSiteMap.put("supervisor.childopts", new SingleHostTopologyUpdater("GANGLIA_SERVER"));
+    stormSiteMap.put("nimbus.childopts", new SingleHostTopologyUpdater("GANGLIA_SERVER"));
+    multiStormSiteMap.put("storm.zookeeper.servers",
+        new YamlMultiValuePropertyDecorator(new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER")));
+
+    // Required due to AMBARI-4933.  These no longer seem to be required as the default values in the stack
+    // are now correct but are left here in case an existing blueprint still contains an old value.
+    hadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());
+    hadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater());
+    hadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater());
+    hadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater());
+    mapredEnvMap.put("jtnode_opt_newsize", new MPropertyUpdater());
+    mapredEnvMap.put("jtnode_opt_maxnewsize", new MPropertyUpdater());
+    mapredEnvMap.put("jtnode_heapsize", new MPropertyUpdater());
+    hbaseEnvMap.put("hbase_master_heapsize", new MPropertyUpdater());
+    hbaseEnvMap.put("hbase_regionserver_heapsize", new MPropertyUpdater());
+  }
+}

+ 24 - 382
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java

@@ -49,7 +49,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.configuration.Configuration;
 
 /**
  * Resource provider for cluster resources.
@@ -72,12 +71,6 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
   private static Set<String> pkPropertyIds =
       new HashSet<String>(Arrays.asList(new String[]{CLUSTER_ID_PROPERTY_ID}));
 
-   /**
-   * Maps properties to updaters which update the property when provisioning a cluster via a blueprint
-   */
-  private Map<String, PropertyUpdater> propertyUpdaters =
-      new HashMap<String, PropertyUpdater>();
-
   /**
    * Maps configuration type (string) to associated properties
    */
@@ -104,7 +97,6 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
                           AmbariManagementController managementController) {
 
     super(propertyIds, keyPropertyIds, managementController);
-    registerPropertyUpdaters();
   }
 
   /**
@@ -377,7 +369,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
     BlueprintEntity blueprint = getExistingBlueprint(blueprintName);
     Stack stack = parseStack(blueprint);
 
-    Map<String, HostGroup> blueprintHostGroups = parseBlueprintHostGroups(blueprint, stack);
+    Map<String, HostGroupImpl> blueprintHostGroups = parseBlueprintHostGroups(blueprint, stack);
     applyRequestInfoToHostGroups(properties, blueprintHostGroups);
     Collection<Map<String, String>> configOverrides = (Collection<Map<String, String>>)properties.get("configurations");
     processConfigurations(processBlueprintConfigurations(blueprint, configOverrides),
@@ -410,7 +402,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    * @throws IllegalArgumentException if required password properties are missing and no
    *                                  default is specified via 'default_password'
    */
-  private void validatePasswordProperties(BlueprintEntity blueprint, Map<String, HostGroup> hostGroups,
+  private void validatePasswordProperties(BlueprintEntity blueprint, Map<String, HostGroupImpl> hostGroups,
                                           String defaultPassword) {
 
     Map<String, Map<String, Collection<String>>> missingPasswords = blueprint.validateConfigurations(
@@ -432,8 +424,8 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
           if (isPropertyInConfiguration(mapClusterConfigurations.get(configType), property)){
               propIter.remove();
           } else {
-            HostGroup hg = hostGroups.get(entry.getKey());
-            if (hg != null && isPropertyInConfiguration(hg.getConfigurations().get(configType), property)) {
+            HostGroupImpl hg = hostGroups.get(entry.getKey());
+            if (hg != null && isPropertyInConfiguration(hg.getConfigurationProperties().get(configType), property)) {
               propIter.remove();
             }  else if (setDefaultPassword(defaultPassword, configType, property)) {
               propIter.remove();
@@ -510,7 +502,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    * @throws ResourceAlreadyExistsException attempted to create a service or component that already exists
    * @throws NoSuchParentResourceException  a required parent resource is missing
    */
-  private void createServiceAndComponentResources(Map<String, HostGroup> blueprintHostGroups,
+  private void createServiceAndComponentResources(Map<String, HostGroupImpl> blueprintHostGroups,
                                                   String clusterName, Set<String> services)
                                                   throws SystemException,
                                                          UnsupportedPropertyException,
@@ -555,12 +547,12 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    * @throws ResourceAlreadyExistsException attempt to create a host or host_component which already exists
    * @throws NoSuchParentResourceException  a required parent resource is missing
    */
-  private void createHostAndComponentResources(Map<String, HostGroup> blueprintHostGroups, String clusterName)
+  private void createHostAndComponentResources(Map<String, HostGroupImpl> blueprintHostGroups, String clusterName)
       throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
 
     ResourceProvider hostProvider = getResourceProvider(Resource.Type.Host);
     ResourceProvider hostComponentProvider = getResourceProvider(Resource.Type.HostComponent);
-    for (HostGroup group : blueprintHostGroups.values()) {
+    for (HostGroupImpl group : blueprintHostGroups.values()) {
       for (String host : group.getHostInfo()) {
         Map<String, Object> hostProperties = new HashMap<String, Object>();
         hostProperties.put("Hosts/cluster_name", clusterName);
@@ -599,7 +591,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    * @throws ResourceAlreadyExistsException attempt to create a component which already exists
    * @throws NoSuchParentResourceException  a required parent resource is missing
    */
-  private void createComponentResources(Map<String, HostGroup> blueprintHostGroups,
+  private void createComponentResources(Map<String, HostGroupImpl> blueprintHostGroups,
                                         String clusterName, Set<String> services)
                                         throws SystemException,
                                                UnsupportedPropertyException,
@@ -607,7 +599,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
                                                NoSuchParentResourceException {
     for (String service : services) {
       Set<String> components = new HashSet<String>();
-      for (HostGroup hostGroup : blueprintHostGroups.values()) {
+      for (HostGroupImpl hostGroup : blueprintHostGroups.values()) {
         Collection<String> serviceComponents = hostGroup.getComponents(service);
         if (serviceComponents != null && !serviceComponents.isEmpty()) {
           components.addAll(serviceComponents);
@@ -694,7 +686,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    */
   @SuppressWarnings("unchecked")
   private void applyRequestInfoToHostGroups(Map<String, Object> properties,
-                                            Map<String, HostGroup> blueprintHostGroups)
+                                            Map<String, HostGroupImpl> blueprintHostGroups)
                                             throws IllegalArgumentException {
 
     @SuppressWarnings("unchecked")
@@ -711,7 +703,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
       if (name == null || name.isEmpty()) {
         throw new IllegalArgumentException("Every host_group must include a non-null 'name' property");
       }
-      HostGroup hostGroup = blueprintHostGroups.get(name);
+      HostGroupImpl hostGroup = blueprintHostGroups.get(name);
 
       if (hostGroup == null) {
         throw new IllegalArgumentException("Invalid host_group specified: " + name +
@@ -731,7 +723,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
         }
         hostGroup.addHostInfo(fqdn);
       }
-      Map<String, Map<String, String>> existingConfigurations = hostGroup.getConfigurations();
+      Map<String, Map<String, String>> existingConfigurations = hostGroup.getConfigurationProperties();
       overrideExistingProperties(existingConfigurations, (Collection<Map<String, String>>)
           hostGroupProperties.get("configurations"));
 
@@ -786,7 +778,8 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    */
   private void processConfigurations(Map<String, Map<String, String>> blueprintConfigurations,
                                      Map<String, Map<String, Map<String, String>>> blueprintAttributes,
-                                     Stack stack, Map<String, HostGroup> blueprintHostGroups)  {
+                                     Stack stack, Map<String, HostGroupImpl> blueprintHostGroups)  {
+
 
     for (String service : getServicesToDeploy(stack, blueprintHostGroups)) {
       for (String type : stack.getConfigurationTypes(service)) {
@@ -817,16 +810,8 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
     processBlueprintClusterConfigurations(blueprintConfigurations);
     processBlueprintClusterConfigAttributes(blueprintAttributes);
 
-    for (Map.Entry<String, Map<String, String>> entry : mapClusterConfigurations.entrySet()) {
-      for (Map.Entry<String, String> propertyEntry : entry.getValue().entrySet()) {
-        String propName = propertyEntry.getKey();
-        // see if property needs to be updated
-        PropertyUpdater propertyUpdater = propertyUpdaters.get(propName);
-        if (propertyUpdater != null) {
-          propertyEntry.setValue(propertyUpdater.update(blueprintHostGroups, propertyEntry.getValue()));
-        }
-      }
-    }
+    BlueprintConfigurationProcessor configurationProcessor = new BlueprintConfigurationProcessor(mapClusterConfigurations);
+    configurationProcessor.doUpdateForClusterCreate(blueprintHostGroups);
     setMissingConfigurations();
   }
   
@@ -942,9 +927,9 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    *
    * @return set of service names which will be deployed
    */
-  private Set<String> getServicesToDeploy(Stack stack, Map<String, HostGroup> blueprintHostGroups) {
+  private Set<String> getServicesToDeploy(Stack stack, Map<String, HostGroupImpl> blueprintHostGroups) {
     Set<String> services = new HashSet<String>();
-    for (HostGroup group : blueprintHostGroups.values()) {
+    for (HostGroupImpl group : blueprintHostGroups.values()) {
       if (! group.getHostInfo().isEmpty()) {
         services.addAll(stack.getServicesForComponents(group.getComponents()));
       }
@@ -955,75 +940,6 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
     return services;
   }
 
-  /**
-   * Register updaters for configuration properties.
-   */
-  private void registerPropertyUpdaters() {
-    // NAMENODE
-    propertyUpdaters.put("dfs.http.address", new SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("dfs.namenode.http-address", new SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("dfs.https.address", new SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("dfs.namenode.https-address", new SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("fs.default.name", new SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("fs.defaultFS", new SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("hbase.rootdir", new SingleHostPropertyUpdater("NAMENODE"));
-
-    // SECONDARY_NAMENODE
-    propertyUpdaters.put("dfs.secondary.http.address", new SingleHostPropertyUpdater("SECONDARY_NAMENODE"));
-    propertyUpdaters.put("dfs.namenode.secondary.http-address", new SingleHostPropertyUpdater("SECONDARY_NAMENODE"));
-
-    // HISTORY_SERVER
-    propertyUpdaters.put("yarn.log.server.url", new SingleHostPropertyUpdater("HISTORYSERVER"));
-    propertyUpdaters.put("mapreduce.jobhistory.webapp.address", new SingleHostPropertyUpdater("HISTORYSERVER"));
-    propertyUpdaters.put("mapreduce.jobhistory.address", new SingleHostPropertyUpdater("HISTORYSERVER"));
-
-    // RESOURCEMANAGER
-    propertyUpdaters.put("yarn.resourcemanager.hostname", new SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.resource-tracker.address", new SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.webapp.address", new SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.scheduler.address", new SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.address", new SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.admin.address", new SingleHostPropertyUpdater("RESOURCEMANAGER"));
-
-    // JOBTRACKER
-    propertyUpdaters.put("mapred.job.tracker", new SingleHostPropertyUpdater("JOBTRACKER"));
-    propertyUpdaters.put("mapred.job.tracker.http.address", new SingleHostPropertyUpdater("JOBTRACKER"));
-    propertyUpdaters.put("mapreduce.history.server.http.address", new SingleHostPropertyUpdater("JOBTRACKER"));
-
-    // HIVE_SERVER
-    propertyUpdaters.put("hive.metastore.uris", new SingleHostPropertyUpdater("HIVE_SERVER"));
-    propertyUpdaters.put("hive_ambari_host", new SingleHostPropertyUpdater("HIVE_SERVER"));
-    propertyUpdaters.put("javax.jdo.option.ConnectionURL",
-        new DBPropertyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
-
-    // OOZIE_SERVER
-    propertyUpdaters.put("oozie.base.url", new SingleHostPropertyUpdater("OOZIE_SERVER"));
-    propertyUpdaters.put("oozie_ambari_host", new SingleHostPropertyUpdater("OOZIE_SERVER"));
-
-    // ZOOKEEPER_SERVER
-    propertyUpdaters.put("hbase.zookeeper.quorum", new MultipleHostPropertyUpdater("ZOOKEEPER_SERVER"));
-    propertyUpdaters.put("templeton.zookeeper.hosts", new MultipleHostPropertyUpdater("ZOOKEEPER_SERVER"));
-
-    // STORM
-    propertyUpdaters.put("nimbus.host", new SingleHostPropertyUpdater("NIMBUS"));
-    propertyUpdaters.put("worker.childopts", new SingleHostPropertyUpdater("GANGLIA_SERVER"));
-    propertyUpdaters.put("supervisor.childopts", new SingleHostPropertyUpdater("GANGLIA_SERVER"));
-    propertyUpdaters.put("nimbus.childopts", new SingleHostPropertyUpdater("GANGLIA_SERVER"));
-    propertyUpdaters.put("storm.zookeeper.servers",
-      new YamlMultiValuePropertyDecorator(new MultipleHostPropertyUpdater("ZOOKEEPER_SERVER")));
-
-    // properties which need "m' appended.  Required due to AMBARI-4933
-    propertyUpdaters.put("namenode_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("namenode_opt_newsize", new MPropertyUpdater());
-    propertyUpdaters.put("namenode_opt_maxnewsize", new MPropertyUpdater());
-    propertyUpdaters.put("dtnode_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("jtnode_opt_newsize", new MPropertyUpdater());
-    propertyUpdaters.put("jtnode_opt_maxnewsize", new MPropertyUpdater());
-    propertyUpdaters.put("jtnode_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("hbase_master_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("hbase_regionserver_heapsize", new MPropertyUpdater());
-  }
-
   /**
    * Register config groups for host group scoped configuration.
    * For each host group with configuration specified in the blueprint, a config group is created
@@ -1038,16 +954,16 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    * @throws UnsupportedPropertyException   an invalid property is provided when creating a config group
    * @throws NoSuchParentResourceException  attempt to create a config group for a non-existing cluster
    */
-  private void registerConfigGroups(String clusterName, Map<String, HostGroup> hostGroups, Stack stack) throws
+  private void registerConfigGroups(String clusterName, Map<String, HostGroupImpl> hostGroups, Stack stack) throws
       ResourceAlreadyExistsException, SystemException,
       UnsupportedPropertyException, NoSuchParentResourceException {
     
-    for (HostGroup group : hostGroups.values()) {
+    for (HostGroupImpl group : hostGroups.values()) {
       HostGroupEntity entity = group.getEntity();
       Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
       
-      handleGlobalsBackwardsCompability(stack, group.getConfigurations());
-      for (Map.Entry<String, Map<String, String>> entry: group.getConfigurations().entrySet()) {
+      handleGlobalsBackwardsCompability(stack, group.getConfigurationProperties());
+      for (Map.Entry<String, Map<String, String>> entry: group.getConfigurationProperties().entrySet()) {
         String type = entry.getKey();
         String service = stack.getServiceForConfigType(type);
         Config config = new ConfigImpl(type);
@@ -1079,11 +995,11 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
    *
    * @param hostGroups map of host group name to host group
    */
-  private void validateHostMappings(Map<String, HostGroup> hostGroups) {
+  private void validateHostMappings(Map<String, HostGroupImpl> hostGroups) {
     Collection<String> mappedHosts = new HashSet<String>();
     Collection<String> flaggedHosts = new HashSet<String>();
 
-    for (HostGroup hostgroup : hostGroups.values()) {
+    for (HostGroupImpl hostgroup : hostGroups.values()) {
       for (String host : hostgroup.getHostInfo()) {
         if (mappedHosts.contains(host)) {
           flaggedHosts.add(host);
@@ -1099,279 +1015,5 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
                                          flaggedHosts);
     }
   }
-
-
-  /**
-   * Provides functionality to update a property value.
-   */
-  public interface PropertyUpdater {
-    /**
-     * Update a property value.
-     *
-     * @param hostGroups  host groups
-     * @param origValue   original value of property
-     *
-     * @return new property value
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue);
-  }
-
-  /**
-   * Topology based updater which replaces the original host name of a property with the host name
-   * which runs the associated (master) component in the new cluster.
-   */
-  private class SingleHostPropertyUpdater implements PropertyUpdater {
-    /**
-     * Component name
-     */
-    private String component;
-
-    /**
-     * Constructor.
-     *
-     * @param component  component name associated with the property
-     */
-    public SingleHostPropertyUpdater(String component) {
-      this.component = component;
-    }
-
-    /**
-     * Update the property with the new host name which runs the associated component.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original property value
-     *
-     * @return updated property value with old host name replaced by new host name
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue)  {
-      Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values());
-      if (matchingGroups.size() == 1) {
-        return origValue.replace("localhost", matchingGroups.iterator().next().getHostInfo().iterator().next());
-      } else {
-        throw new IllegalArgumentException("Unable to update configuration property with topology information. " +
-            "Component '" + this.component + "' is not mapped to any host group or is mapped to multiple groups.");
-      }
-    }
-  }
-
-  /**
-   * Topology based updater which replaces the original host name of a database property with the host name
-   * where the DB is deployed in the new cluster.  If an existing database is specified, the original property
-   * value is returned.
-   */
-  private class DBPropertyUpdater extends SingleHostPropertyUpdater {
-    /**
-     * Property type (global, core-site ...) for property which is used to determine if DB is external.
-     */
-    private final String configPropertyType;
-
-    /**
-     * Name of property which is used to determine if DB is new or existing (exernal).
-     */
-    private final String conditionalPropertyName;
-
-    /**
-     * Constructor.
-     *
-     * @param component                component to get hot name if new DB
-     * @param configPropertyType       config type of property used to determine if DB is external
-     * @param conditionalPropertyName  name of property which is used to determine if DB is external
-     */
-    private DBPropertyUpdater(String component, String configPropertyType, String conditionalPropertyName) {
-      super(component);
-      this.configPropertyType = configPropertyType;
-      this.conditionalPropertyName = conditionalPropertyName;
-    }
-
-    /**
-     * If database is a new managed database, update the property with the new host name which
-     * runs the associated component.  If the database is external (non-managed), return the
-     * original value.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original property value
-     *
-     * @return updated property value with old host name replaced by new host name or original value
-     *         if the database is exernal
-     */
-    @Override
-    public String update(Map<String, HostGroup> hostGroups, String origValue) {
-      if (isDatabaseManaged()) {
-        return super.update(hostGroups, origValue);
-      } else {
-        return origValue;
-      }
-    }
-
-    /**
-     * Determine if database is managed, meaning that it is a component in the cluster topology.
-     *
-     * @return true if the DB is managed; false otherwise
-     */
-    //todo: use super.isDependencyManaged() and remove this method
-    private boolean isDatabaseManaged() {
-      // conditional property should always exist since it is required to be specified in the stack
-      return mapClusterConfigurations.get(configPropertyType).
-          get(conditionalPropertyName).startsWith("New");
-    }
-  }
-
-  /**
-   * Topology based updater which replaces original host names (possibly more than one) contained in a property
-   * value with the host names which runs the associated component in the new cluster.
-   */
-  private class MultipleHostPropertyUpdater implements PropertyUpdater {
-    /**
-     * Component name
-     */
-    private String component;
-
-    /**
-     * Separator for multiple property values
-     */
-    private Character separator = ',';
-
-    /**
-     * Constructor.
-     *
-     * @param component  component name associated with the property
-     */
-    public MultipleHostPropertyUpdater(String component) {
-      this.component = component;
-    }
-
-    /**
-     * Constructor with customized separator.
-     * @param component Component name
-     * @param separator separator character
-     */
-    public MultipleHostPropertyUpdater(String component, Character separator) {
-      this.component = component;
-      this.separator = separator;
-    }
-
-    //todo: specific to default values of EXACTLY 'localhost' or 'localhost:port'.
-    //todo: when blueprint contains source configurations, these props will contain actual host names, not localhost.
-    //todo: currently assuming that all hosts will share the same port
-    /**
-     * Update all host names included in the original property value with new host names which run the associated
-     * component.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original value
-     *
-     * @return updated property value with old host names replaced by new host names
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue) {
-      Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values());
-      boolean containsPort = origValue.contains(":");
-      String port = null;
-      if (containsPort) {
-        port = origValue.substring(origValue.indexOf(":") + 1);
-      }
-      StringBuilder sb = new StringBuilder();
-      boolean firstHost = true;
-      for (HostGroup group : matchingGroups) {
-        for (String host : group.getHostInfo()) {
-          if (!firstHost) {
-            sb.append(separator);
-          } else {
-            firstHost = false;
-          }
-          sb.append(host);
-          if (containsPort) {
-            sb.append(":");
-            sb.append(port);
-          }
-        }
-      }
-
-      return sb.toString();
-    }
-  }
-
-  /**
-   * Updater which appends "m" to the original property value.
-   * For example, "1024" would be updated to "1024m".
-   */
-  private class MPropertyUpdater implements PropertyUpdater {
-    /**
-     * Append 'm' to the original property value if it doesn't already exist.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original property value
-     *
-     * @return property with 'm' appended
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue) {
-      return origValue.endsWith("m") ? origValue : origValue + 'm';
-    }
-  }
-
-  /**
-   * Class to facilitate special formatting needs of property values.
-   */
-  private abstract class AbstractPropertyValueDecorator implements PropertyUpdater {
-    PropertyUpdater propertyUpdater;
-
-    public AbstractPropertyValueDecorator(PropertyUpdater propertyUpdater) {
-      this.propertyUpdater = propertyUpdater;
-    }
-
-    /**
-     * Return decorated form of the updated input property value.
-     * @param hostGroupMap Map of host group name to HostGroup
-     * @param origValue   original value of property
-     *
-     * @return Formatted output string
-     */
-    @Override
-    public String update(Map<String, HostGroup> hostGroupMap, String origValue) {
-      return doFormat(propertyUpdater.update(hostGroupMap, origValue));
-    }
-
-    /**
-     * Transform input string to required output format.
-     * @param originalValue Original value of property
-     * @return Formatted output string
-     */
-    public abstract String doFormat(String originalValue);
-  }
-
-  /**
-   * Return properties of the form ['value']
-   */
-  private class YamlMultiValuePropertyDecorator extends AbstractPropertyValueDecorator {
-
-    public YamlMultiValuePropertyDecorator(PropertyUpdater propertyUpdater) {
-      super(propertyUpdater);
-    }
-
-    /**
-     * Format input String of the form, str1,str2 to ['str1','str2']
-     * @param origValue Input string
-     * @return Formatted string
-     */
-    @Override
-    public String doFormat(String origValue) {
-      StringBuilder sb = new StringBuilder();
-      if (origValue != null) {
-        sb.append("[");
-        boolean isFirst = true;
-        for (String value : origValue.split(",")) {
-          if (!isFirst) {
-            sb.append(",");
-          } else {
-            isFirst = false;
-          }
-          sb.append("'");
-          sb.append(value);
-          sb.append("'");
-        }
-        sb.append("]");
-      }
-      return sb.toString();
-    }
-  }
 }
 

+ 10 - 3
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java

@@ -145,7 +145,7 @@ public class ConfigurationResourceProvider extends
     final Set<ConfigurationRequest> requests = new HashSet<ConfigurationRequest>();
 
     for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
+      requests.add(getRequest(request, propertyMap));
     }
 
     Set<ConfigurationResponse> responses = getResources(new Command<Set<ConfigurationResponse>>() {
@@ -245,12 +245,19 @@ public class ConfigurationResourceProvider extends
    *
    * @return a configuration request
    */
-  private ConfigurationRequest getRequest(Map<String, Object> properties) {
+  private ConfigurationRequest getRequest(Request request, Map<String, Object> properties) {
     String type = (String) properties.get(CONFIGURATION_CONFIG_TYPE_PROPERTY_ID);
     String tag  = (String) properties.get(CONFIGURATION_CONFIG_TAG_PROPERTY_ID);
 
-    return new ConfigurationRequest(
+    ConfigurationRequest configRequest = new ConfigurationRequest(
         (String) properties.get(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID),
         type, tag, new HashMap<String, String>(), new HashMap<String, Map<String, String>>());
+
+    Set<String> requestedIds = request.getPropertyIds();
+    if (requestedIds.contains("properties") || requestedIds.contains("*")) {
+      configRequest.setIncludeProperties(true);
+    }
+
+    return configRequest;
   }
 }

+ 56 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostGroup.java

@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Host Group definition.
+ */
+public interface HostGroup {
+
+  /**
+   * Get the host group name.
+   *
+   * @return host group name
+   */
+  public String getName();
+
+  /**
+   * Get associated host information.
+   *
+   * @return collection of hosts associated with the host group
+   */
+  public Collection<String> getHostInfo();
+
+  /**
+   * Get the components associated with the host group.
+   *
+   * @return  collection of component names for the host group
+   */
+  public Collection<String> getComponents();
+
+  /**
+   * Get the configurations associated with the host group.
+   *
+   * @return map of configuration type to a map of properties
+   */
+  public Map<String, Map<String, String>> getConfigurationProperties();
+}

+ 31 - 1
ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java

@@ -22,22 +22,30 @@ import org.apache.ambari.server.api.query.QueryInfo;
 import org.apache.ambari.server.api.resources.ClusterResourceDefinition;
 import org.apache.ambari.server.api.resources.HostComponentResourceDefinition;
 import org.apache.ambari.server.api.resources.HostResourceDefinition;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.Result;
 import org.apache.ambari.server.api.services.ResultImpl;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.api.util.TreeNodeImpl;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
 import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -91,12 +99,20 @@ public class ClusterBlueprintRendererTest {
     assertTrue(propertyTree.getChild("Host/HostComponent").getObject().contains("HostRoles/component_name"));
   }
 
+  @Ignore
   @Test
   public void testFinalizeResult() throws Exception{
+
+    AmbariManagementController controller = createMock(AmbariManagementController.class);
+    AmbariMetaInfo stackInfo = createNiceMock(AmbariMetaInfo.class);
+
+    expect(stackInfo.getRequiredProperties("HDP", "1.3.3", "HDFS")).andReturn(Collections.<String, PropertyInfo>emptyMap());
+    expect(stackInfo.getRequiredProperties("HDP", "1.3.3", "MAPREDUCE")).andReturn(Collections.<String, PropertyInfo>emptyMap());
+
     Result result = new ResultImpl(true);
     createClusterResultTree(result.getResultTree());
 
-    ClusterBlueprintRenderer renderer = new ClusterBlueprintRenderer();
+    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(controller);
     Result blueprintResult = renderer.finalizeResult(result);
 
     TreeNode<Resource> blueprintTree = blueprintResult.getResultTree();
@@ -222,4 +238,18 @@ public class ClusterBlueprintRendererTest {
   private String getLocalHostName() throws UnknownHostException {
     return InetAddress.getLocalHost().getHostName();
   }
+
+  private static class TestBlueprintRenderer extends ClusterBlueprintRenderer {
+
+    private AmbariManagementController testController;
+
+    private TestBlueprintRenderer(AmbariManagementController controller) {
+      testController = controller;
+    }
+
+    @Override
+    protected AmbariManagementController getController() {
+      return testController;
+    }
+  }
 }

+ 932 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java

@@ -0,0 +1,932 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import org.junit.Test;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertTrue;
+
+
+/**
+ * BlueprintConfigurationProcessor unit tests.
+ */
+public class BlueprintConfigurationProcessorTest {
+
+  @Test
+  public void testDoUpdateForBlueprintExport_SingleHostProperty() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "testhost");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    assertEquals("%HOSTGROUP::group1%", updatedVal);
+  }
+  
+  @Test
+  public void testDoUpdateForBlueprintExport_SingleHostProperty__withPort() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("fs.defaultFS", "testhost:8020");
+    properties.put("core-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    assertEquals("%HOSTGROUP::group1%:8020", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_SingleHostProperty__ExternalReference() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "external-host");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    assertFalse(updatedProperties.get("yarn-site").containsKey("yarn.resourcemanager.hostname"));
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_MultiHostProperty() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase.zookeeper.quorum", "testhost,testhost2,testhost2a,testhost2b");
+    properties.put("hbase-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    assertEquals("%HOSTGROUP::group1%,%HOSTGROUP::group2%", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_MultiHostProperty__WithPorts() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("templeton.zookeeper.hosts", "testhost:5050,testhost2:9090,testhost2a:9090,testhost2b:9090");
+    properties.put("webhcat-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    assertEquals("%HOSTGROUP::group1%:5050,%HOSTGROUP::group2%:9090", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_MultiHostProperty__YAML() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("storm.zookeeper.servers", "['testhost:5050','testhost2:9090','testhost2a:9090','testhost2b:9090']");
+    properties.put("storm-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    assertEquals("['%HOSTGROUP::group1%:5050','%HOSTGROUP::group2%:9090']", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_DBHostProperty() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hiveSiteProps = new HashMap<String, String>();
+    hiveSiteProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true");
+    properties.put("hive-site", hiveSiteProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    hgComponents.add("MYSQL_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_DBHostProperty__External() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://external-host/hive?createDatabaseIfNotExist=true");
+    properties.put("hive-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
+    assertFalse(updatedProperties.get("hive-site").containsKey("javax.jdo.option.ConnectionURL"));
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "localhost");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    assertEquals("testhost", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue__WithPort() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("fs.defaultFS", "localhost:5050");
+    properties.put("core-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    assertEquals("testhost:5050", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase.zookeeper.quorum", "localhost");
+    properties.put("hbase-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost");
+    expectedHosts.add("testhost2");
+    expectedHosts.add("testhost2a");
+    expectedHosts.add("testhost2b");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___withPorts() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("templeton.zookeeper.hosts", "localhost:9090");
+    properties.put("webhcat-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost:9090");
+    expectedHosts.add("testhost2:9090");
+    expectedHosts.add("testhost2a:9090");
+    expectedHosts.add("testhost2b:9090");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___YAML() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("storm.zookeeper.servers", "['localhost']");
+    properties.put("storm-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    assertTrue(updatedVal.startsWith("["));
+    assertTrue(updatedVal.endsWith("]"));
+    // remove the surrounding brackets
+    updatedVal = updatedVal.replaceAll("[\\[\\]]", "");
+
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("'testhost'");
+    expectedHosts.add("'testhost2'");
+    expectedHosts.add("'testhost2a'");
+    expectedHosts.add("'testhost2b'");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MProperty__defaultValues() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase_master_heapsize", "512m");
+    properties.put("hbase-env", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hbase-env").get("hbase_master_heapsize");
+    assertEquals("512m", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MProperty__missingM() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase_master_heapsize", "512");
+    properties.put("hbase-env", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hbase-env").get("hbase_master_heapsize");
+    assertEquals("512m", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "%HOSTGROUP::group1%");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    assertEquals("testhost", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue__WithPort() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("fs.defaultFS", "%HOSTGROUP::group1%:5050");
+    properties.put("core-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    assertEquals("testhost:5050", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase.zookeeper.quorum", "%HOSTGROUP::group1%,%HOSTGROUP::group2%");
+    properties.put("hbase-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost");
+    expectedHosts.add("testhost2");
+    expectedHosts.add("testhost2a");
+    expectedHosts.add("testhost2b");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___withPorts() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("templeton.zookeeper.hosts", "%HOSTGROUP::group1%:9090,%HOSTGROUP::group2%:9091");
+    properties.put("webhcat-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost:9090");
+    expectedHosts.add("testhost2:9091");
+    expectedHosts.add("testhost2a:9091");
+    expectedHosts.add("testhost2b:9091");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___YAML() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("storm.zookeeper.servers", "['%HOSTGROUP::group1%:9090','%HOSTGROUP::group2%:9091']");
+    properties.put("storm-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    assertTrue(updatedVal.startsWith("["));
+    assertTrue(updatedVal.endsWith("]"));
+    // remove the surrounding brackets
+    updatedVal = updatedVal.replaceAll("[\\[\\]]", "");
+
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("'testhost:9090'");
+    expectedHosts.add("'testhost2:9091'");
+    expectedHosts.add("'testhost2a:9091'");
+    expectedHosts.add("'testhost2b:9091'");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_DBHostProperty__defaultValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hiveSiteProps = new HashMap<String, String>();
+    hiveSiteProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true");
+    Map<String, String> hiveEnvProps = new HashMap<String, String>();
+    hiveEnvProps.put("hive_database", "New MySQL Database");
+    properties.put("hive-site", hiveSiteProps);
+    properties.put("hive-env", hiveEnvProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    hgComponents.add("MYSQL_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_DBHostProperty__exportedValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hiveSiteProps = new HashMap<String, String>();
+    hiveSiteProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true");
+    Map<String, String> hiveEnvProps = new HashMap<String, String>();
+    hiveEnvProps.put("hive_database", "New MySQL Database");
+    properties.put("hive-site", hiveSiteProps);
+    properties.put("hive-env", hiveEnvProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    hgComponents.add("MYSQL_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_DBHostProperty__external() {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://myHost.com/hive?createDatabaseIfNotExist=true");
+    typeProps.put("hive_database", "Existing MySQL Database");
+    properties.put("hive-env", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("hive-env").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://myHost.com/hive?createDatabaseIfNotExist=true", updatedVal);
+  }
+
+  private class TestHostGroup implements HostGroup {
+
+    private String name;
+    private Collection<String> hosts;
+    private Collection<String> components;
+
+    private TestHostGroup(String name, Collection<String> hosts, Collection<String> components) {
+      this.name = name;
+      this.hosts = hosts;
+      this.components = components;
+    }
+
+    @Override
+    public String getName() {
+      return name;
+    }
+
+    @Override
+    public Collection<String> getHostInfo() {
+      return hosts;
+    }
+
+    @Override
+    public Collection<String> getComponents() {
+      return components;
+    }
+
+    @Override
+    public Map<String, Map<String, String>> getConfigurationProperties() {
+      return null;
+    }
+  }
+
+}

+ 0 - 103
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java

@@ -21,7 +21,6 @@ package org.apache.ambari.server.controller.internal;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.eq;
@@ -34,9 +33,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.lang.reflect.Field;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -61,7 +58,6 @@ import org.apache.ambari.server.controller.StackServiceComponentRequest;
 import org.apache.ambari.server.controller.StackServiceComponentResponse;
 import org.apache.ambari.server.controller.StackServiceRequest;
 import org.apache.ambari.server.controller.StackServiceResponse;
-import org.apache.ambari.server.controller.internal.ClusterResourceProvider.PropertyUpdater;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.RequestStatus;
@@ -69,7 +65,6 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.internal.BaseBlueprintProcessor.HostGroup;
 import org.apache.ambari.server.orm.dao.BlueprintDAO;
 import org.apache.ambari.server.orm.entities.BlueprintConfigEntity;
 import org.apache.ambari.server.orm.entities.BlueprintEntity;
@@ -80,7 +75,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.State;
-import org.apache.commons.collections.CollectionUtils;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.Assert;
@@ -2535,103 +2529,6 @@ public class ClusterResourceProviderTest {
         hostComponentResourceProvider, configGroupResourceProvider, persistKeyValue, metaInfo);
   }
 
-  @SuppressWarnings("unchecked")
-  @Test
-  public void testBlueprintPropertyUpdaters() throws Exception {
-    final Map<String, String> singleHostProperty1 =
-      Collections.singletonMap("dfs.http.address", "localhost:50070");
-
-    final Map<String, String> singleHostProperty2 =
-      Collections.singletonMap("hive.metastore.uris", "prefix.localhost.suffix");
-
-    final Map<String, String> multiHostProperty1 =
-      Collections.singletonMap("hbase.zookeeper.quorum", "localhost");
-
-    final Map<String, String> multiHostProperty2 =
-      Collections.singletonMap("storm.zookeeper.servers", "['localhost']");
-
-    final Map<String, String> mProperty =
-      Collections.singletonMap("namenode_heapsize", "1025");
-
-    final Map<String, String> databaseProperty =
-        Collections.singletonMap("javax.jdo.option.ConnectionURL", "localhost:12345");
-
-    final HostGroup hostGroup1 = createNiceMock(HostGroup.class);
-    final HostGroup hostGroup2 = createNiceMock(HostGroup.class);
-
-    expect(hostGroup1.getComponents()).andReturn(new ArrayList<String>() {{
-      add("NAMENODE");
-      add("HBASE_MASTER");
-      add("HIVE_SERVER");
-      add("ZOOKEEPER_SERVER");
-    }}).anyTimes();
-    expect(hostGroup1.getHostInfo()).andReturn(Collections.singletonList("h1")).anyTimes();
-
-    expect(hostGroup2.getComponents()).andReturn(Collections.singletonList("ZOOKEEPER_SERVER")).anyTimes();
-    expect(hostGroup2.getHostInfo()).andReturn(Collections.singletonList("h2")).anyTimes();
-
-    Map<String, HostGroup> hostGroups = new
-      HashMap<String, HostGroup>() {{
-        put("host_group_1", hostGroup1);
-        put("host_group_2", hostGroup2);
-      }};
-
-    AmbariManagementController managementController = createNiceMock(AmbariManagementController.class);
-
-    ClusterResourceProvider resourceProvider =
-      createMockBuilder(ClusterResourceProvider.class)
-        .withConstructor(Set.class, Map.class, AmbariManagementController.class)
-        .withArgs(new HashSet<String>(), new HashMap<Resource.Type, String>(), managementController)
-        .createMock();
-
-    replay(managementController, resourceProvider, hostGroup1, hostGroup2);
-
-    Map<String, Map<String, String>> mapConfigurations;
-    Field configField = ClusterResourceProvider.class.getDeclaredField("mapClusterConfigurations");
-    configField.setAccessible(true);
-    mapConfigurations = (Map<String, Map<String, String>>) configField.get(resourceProvider);
-
-    Map<String, PropertyUpdater> propertyUpdaterMap;
-    Field f = ClusterResourceProvider.class.getDeclaredField("propertyUpdaters");
-    f.setAccessible(true);
-    propertyUpdaterMap = (Map<String, PropertyUpdater>) f.get(resourceProvider);
-
-    Assert.assertNotNull(propertyUpdaterMap);
-
-    String newValue;
-
-    Map.Entry<String, String> entry = singleHostProperty1.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, entry.getValue());
-    Assert.assertEquals("h1:50070", newValue);
-
-    entry = singleHostProperty2.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, entry.getValue());
-    Assert.assertEquals("prefix.h1.suffix", newValue);
-
-    entry = multiHostProperty1.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, entry.getValue());
-    Assert.assertTrue(CollectionUtils.isEqualCollection(
-      Arrays.asList("h1,h2".split(",")), Arrays.asList(newValue.split(","))
-    ));
-
-    entry = multiHostProperty2.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, entry.getValue());
-    // no ordering guarantee
-    Assert.assertTrue(newValue.equals("['h1','h2']") || newValue.equals("['h2','h1']"));
-
-    entry = mProperty.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, entry.getValue());
-    Assert.assertEquals("1025m", newValue);
-
-    Map<String, String> configs = new HashMap<String, String>();
-    configs.put("hive_database", "External MySQL Database");
-    mapConfigurations.put("hive-env", configs);
-    entry = databaseProperty.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, entry.getValue());
-    Assert.assertEquals("localhost:12345", newValue);
-
-    verify(managementController, resourceProvider, hostGroup1, hostGroup2);
-  }
 
   @Test
   public void testGetResources() throws Exception{