Forráskód Böngészése

Merge branch 'branch-feature-AMBARI-18456' into trunk

Jonathan Hurley 8 éve
szülő
commit
704170e4e1
57 módosított fájl, 1198 hozzáadás és 1718 törlés
  1. 3 10
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  2. 29 31
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
  3. 10 6
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
  4. 7 9
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
  5. 5 4
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
  6. 8 6
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
  7. 7 6
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
  8. 7 6
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
  9. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
  10. 7 6
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
  11. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
  12. 10 10
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
  13. 1 2
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
  14. 1 2
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
  15. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
  16. 1 21
      ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
  17. 17 3
      ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
  18. 225 255
      ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
  19. 4 2
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
  20. 6 27
      ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
  21. 25 9
      ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
  22. 278 335
      ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
  23. 14 11
      ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
  24. 6 4
      ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java
  25. 4 13
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
  26. 5 14
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
  27. 1 5
      ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
  28. 4 9
      ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
  29. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
  30. 8 14
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
  31. 29 80
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  32. 5 9
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
  33. 4 9
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
  34. 7 12
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
  35. 19 77
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
  36. 27 49
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
  37. 88 99
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
  38. 68 80
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
  39. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
  40. 7 21
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
  41. 22 50
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
  42. 50 123
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
  43. 10 26
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
  44. 8 22
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
  45. 14 14
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
  46. 12 14
      ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
  47. 23 26
      ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
  48. 2 6
      ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
  49. 7 10
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
  50. 32 101
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
  51. 2 6
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
  52. 2 7
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
  53. 2 4
      ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
  54. 5 19
      ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
  55. 33 5
      ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
  56. 17 23
      ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java
  57. 4 0
      ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java

+ 3 - 10
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -55,7 +55,6 @@ import java.util.EnumMap;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -80,10 +79,10 @@ import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.actionmanager.CommandExecutionType;
 import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
@@ -895,17 +894,11 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   @Override
   public Config createConfig(Cluster cluster, String type, Map<String, String> properties,
                              String versionTag, Map<String, Map<String, String>> propertiesAttributes) {
-    Config config = configFactory.createNew(cluster, type,
-      properties, propertiesAttributes);
 
-    if (!StringUtils.isEmpty(versionTag)) {
-      config.setTag(versionTag);
-    }
-
-    config.persist();
+    Config config = configFactory.createNew(cluster, type, versionTag, properties,
+        propertiesAttributes);
 
     cluster.addConfig(config);
-
     return config;
   }
 

+ 29 - 31
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java

@@ -17,7 +17,16 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.ConfigGroupNotFoundException;
@@ -48,7 +57,7 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
@@ -56,15 +65,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Inject;
 
 @StaticallyInject
 public class ConfigGroupResourceProvider extends
@@ -101,6 +102,12 @@ public class ConfigGroupResourceProvider extends
   @Inject
   private static HostDAO hostDAO;
 
+  /**
+   * Used for creating {@link Config} instances to return in the REST response.
+   */
+  @Inject
+  private static ConfigFactory configFactory;
+
   /**
    * Create a  new resource provider for the given management controller.
    *
@@ -568,22 +575,19 @@ public class ConfigGroupResourceProvider extends
         }
       }
 
+      configLogger.info("User {} is creating new configuration group {} for tag {} in cluster {}",
+          getManagementController().getAuthName(), request.getGroupName(), request.getTag(),
+          cluster.getClusterName());
+
       ConfigGroup configGroup = configGroupFactory.createNew(cluster,
         request.getGroupName(),
         request.getTag(), request.getDescription(),
         request.getConfigs(), hosts);
 
-      verifyConfigs(configGroup.getConfigurations(), cluster.getClusterName());
       configGroup.setServiceName(serviceName);
 
-      // Persist before add, since id is auto-generated
-      configLogger.info("Persisting new Config group"
-        + ", clusterName = " + cluster.getClusterName()
-        + ", name = " + configGroup.getName()
-        + ", tag = " + configGroup.getTag()
-        + ", user = " + getManagementController().getAuthName());
+      verifyConfigs(configGroup.getConfigurations(), cluster.getClusterName());
 
-      configGroup.persist();
       cluster.addConfigGroup(configGroup);
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
@@ -634,6 +638,11 @@ public class ConfigGroupResourceProvider extends
                                  + ", clusterName = " + request.getClusterName()
                                  + ", groupId = " + request.getId());
       }
+
+      configLogger.info("User {} is updating configuration group {} for tag {} in cluster {}",
+          getManagementController().getAuthName(), request.getGroupName(), request.getTag(),
+          cluster.getClusterName());
+
       String serviceName = configGroup.getServiceName();
       String requestServiceName = cluster.getServiceForConfigTypes(request.getConfigs().keySet());
       if (StringUtils.isEmpty(serviceName) && StringUtils.isEmpty(requestServiceName)) {
@@ -682,13 +691,6 @@ public class ConfigGroupResourceProvider extends
       configGroup.setDescription(request.getDescription());
       configGroup.setTag(request.getTag());
 
-      configLogger.info("Persisting updated Config group"
-        + ", clusterName = " + configGroup.getClusterName()
-        + ", id = " + configGroup.getId()
-        + ", tag = " + configGroup.getTag()
-        + ", user = " + getManagementController().getAuthName());
-
-      configGroup.persist();
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
           request.getServiceConfigVersionNote(), configGroup);
@@ -781,11 +783,7 @@ public class ConfigGroupResourceProvider extends
             }
           }
 
-          Config config = new ConfigImpl(type);
-          config.setTag(tag);
-          config.setProperties(configProperties);
-          config.setPropertiesAttributes(configAttributes);
-
+          Config config = configFactory.createReadOnly(type, tag, configProperties, configAttributes);
           configurations.put(config.getType(), config);
         }
       } catch (Exception e) {

+ 10 - 6
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java

@@ -451,7 +451,7 @@ public class ConfigureAction extends AbstractServerAction {
     // of creating a whole new history record since it was already done
     if (!targetStack.equals(currentStack) && targetStack.equals(configStack)) {
       config.setProperties(newValues);
-      config.persist(false);
+      config.save();
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
     }
@@ -570,8 +570,9 @@ public class ConfigureAction extends AbstractServerAction {
 
     for(Replace replacement: replacements){
       if(isOperationAllowed(cluster, configType, replacement.key,
-          replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState))
+          replacement.ifKey, replacement.ifType, replacement.ifValue, replacement.ifKeyState)) {
         allowedReplacements.add(replacement);
+      }
     }
 
     return allowedReplacements;
@@ -582,8 +583,9 @@ public class ConfigureAction extends AbstractServerAction {
 
     for(ConfigurationKeyValue configurationKeyValue: sets){
       if(isOperationAllowed(cluster, configType, configurationKeyValue.key,
-          configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState))
+          configurationKeyValue.ifKey, configurationKeyValue.ifType, configurationKeyValue.ifValue, configurationKeyValue.ifKeyState)) {
         allowedSets.add(configurationKeyValue);
+      }
     }
 
     return allowedSets;
@@ -593,14 +595,16 @@ public class ConfigureAction extends AbstractServerAction {
     List<Transfer> allowedTransfers = new ArrayList<>();
     for (Transfer transfer : transfers) {
       String key = "";
-      if(transfer.operation == TransferOperation.DELETE)
+      if(transfer.operation == TransferOperation.DELETE) {
         key = transfer.deleteKey;
-      else
+      } else {
         key = transfer.fromKey;
+      }
 
       if(isOperationAllowed(cluster, configType, key,
-          transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState))
+          transfer.ifKey, transfer.ifType, transfer.ifValue, transfer.ifKeyState)) {
         allowedTransfers.add(transfer);
+      }
     }
 
     return allowedTransfers;

+ 7 - 9
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java

@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -28,13 +32,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -78,7 +76,7 @@ public class FixLzoCodecPath extends AbstractServerAction {
         }
       }
       config.setProperties(properties);
-      config.persist(false);
+      config.save();
     }
     if (modifiedProperties.isEmpty()) {
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",

+ 5 - 4
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java

@@ -18,7 +18,9 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -28,8 +30,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
@@ -86,7 +87,7 @@ public class FixOozieAdminUsers extends AbstractServerAction {
     oozieProperties.put(OOZIE_ADMIN_USERS_PROP, newOozieAdminUsers);
 
     oozieConfig.setProperties(oozieProperties);
-    oozieConfig.persist(false);
+    oozieConfig.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
             String.format("Set oozie admin users to %s", newOozieAdminUsers), "");

+ 8 - 6
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java

@@ -18,7 +18,10 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.math.BigDecimal;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,9 +30,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.math.BigDecimal;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * Computes HBase properties.  This class is only used when moving from
@@ -79,8 +80,9 @@ public class HBaseConfigCalculation extends AbstractServerAction {
                                    "Upper or lower memstore limit setting value is malformed, skipping", "");
     }
 
-    if (lowerLimit.scale() < 2) //make sure result will have at least 2 digits after decimal point
+    if (lowerLimit.scale() < 2) {
       lowerLimit = lowerLimit.setScale(2, BigDecimal.ROUND_HALF_UP);
+    }
     BigDecimal lowerLimitNew = lowerLimit.divide(upperLimit, BigDecimal.ROUND_HALF_UP);
 
     properties.put(NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString());
@@ -90,7 +92,7 @@ public class HBaseConfigCalculation extends AbstractServerAction {
     properties.remove(OLD_LOWER_LIMIT_PROPERTY_NAME);
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
                   String.format("%s was set to %s", NEW_LOWER_LIMIT_PROPERTY_NAME, lowerLimitNew.toString()), "");

+ 7 - 6
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java

@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Computes HBase Env content property.
@@ -79,7 +80,7 @@ public class HBaseEnvMaxDirectMemorySizeAction extends AbstractServerAction {
     properties.put(CONTENT_NAME, appendedContent);
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
       String.format("The %s/%s property was appended with %s", SOURCE_CONFIG_TYPE, CONTENT_NAME, APPEND_CONTENT_LINE),"");

+ 7 - 6
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java

@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Append hive-env config type with HIVE_HOME and HIVE_CONF_DIR variables if they are absent
@@ -103,7 +104,7 @@ public class HiveEnvClasspathAction extends AbstractServerAction {
     }
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
       String.format("Added %s, %s to content at %s", HIVE_CONF_DIR, HIVE_HOME, TARGET_CONFIG_TYPE), "");

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java

@@ -85,7 +85,7 @@ public class HiveZKQuorumConfigAction extends AbstractServerAction {
     hiveSiteProperties.put(HIVE_SITE_ZK_CONNECT_STRING, zookeeperQuorum);
 
     hiveSite.setProperties(hiveSiteProperties);
-    hiveSite.persist(false);
+    hiveSite.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("Successfully set %s and %s in %s", HIVE_SITE_ZK_QUORUM,

+ 7 - 6
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java

@@ -18,7 +18,11 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
@@ -27,10 +31,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Inject;
 
 /**
  * Changes oozie-env during upgrade (adds -Dhdp.version to $HADOOP_OPTS variable)
@@ -67,7 +68,7 @@ public class OozieConfigCalculation extends AbstractServerAction {
     }
 
     config.setProperties(properties);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
                   String.format("Added -Dhdp.version to $HADOOP_OPTS variable at %s", TARGET_CONFIG_TYPE), "");

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java

@@ -141,13 +141,13 @@ public class RangerConfigCalculation extends AbstractServerAction {
     targetValues.put("ranger.jpa.audit.jdbc.dialect", dialect);
 
     config.setProperties(targetValues);
-    config.persist(false);
+    config.save();
 
     config = cluster.getDesiredConfigByType(RANGER_ENV_CONFIG_TYPE);
     targetValues = config.getProperties();
     targetValues.put("ranger_privelege_user_jdbc_url", userJDBCUrl);
     config.setProperties(targetValues);
-    config.persist(false);
+    config.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", stdout.toString(), "");
   }

+ 10 - 10
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java

@@ -87,7 +87,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hadoopUser) {
         targetValues.put(RANGER_PLUGINS_HDFS_SERVICE_USER, hadoopUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HDFS_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hdfs_user", HADOOP_ENV_CONFIG_TYPE);
@@ -104,7 +104,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hiveUser) {
         targetValues.put(RANGER_PLUGINS_HIVE_SERVICE_USER, hiveUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HIVE_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hive_user", HIVE_ENV_CONFIG_TYPE);
@@ -121,7 +121,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != yarnUser) {
         targetValues.put(RANGER_PLUGINS_YARN_SERVICE_USER, yarnUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_YARN_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "yarn_user", YARN_ENV_CONFIG_TYPE);
@@ -138,7 +138,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != hbaseUser) {
         targetValues.put(RANGER_PLUGINS_HBASE_SERVICE_USER, hbaseUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_HBASE_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "hbase_user", HBASE_ENV_CONFIG_TYPE);
@@ -155,7 +155,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != knoxUser) {
         targetValues.put(RANGER_PLUGINS_KNOX_SERVICE_USER, knoxUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KNOX_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "knox_user", KNOX_ENV_CONFIG_TYPE);
@@ -190,7 +190,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
         }
         targetValues.put(RANGER_PLUGINS_STORM_SERVICE_USER, stormValue);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_STORM_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "storm_user", STORM_ENV_CONFIG_TYPE);
@@ -207,7 +207,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != kafkaUser) {
         targetValues.put(RANGER_PLUGINS_KAFKA_SERVICE_USER, kafkaUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KAFKA_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kafka_user", KAFKA_ENV_CONFIG_TYPE);
@@ -224,7 +224,7 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
       if (null != rangerKmsUser) {
         targetValues.put(RANGER_PLUGINS_KMS_SERVICE_USER, rangerKmsUser);
         rangerAdminconfig.setProperties(targetValues);
-        rangerAdminconfig.persist(false);
+        rangerAdminconfig.save();
         sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_PLUGINS_KMS_SERVICE_USER);
       } else {
         errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "kms_user", RANGER_KMS_ENV_CONFIG_TYPE);
@@ -243,10 +243,10 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
         if (null != spnegoKeytab) {
           targetValues.put(RANGER_SPNEGO_KEYTAB, spnegoKeytab);
           rangerAdminconfig.setProperties(targetValues);
-          rangerAdminconfig.persist(false);
+          rangerAdminconfig.save();
           sucessMsg = sucessMsg + MessageFormat.format("{0}\n", RANGER_SPNEGO_KEYTAB);
         } else {
-          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);          
+          errMsg = errMsg + MessageFormat.format("{0} not found in {1}\n", "dfs.web.authentication.kerberos.keytab", HDFS_SITE_CONFIG_TYPE);
         }
 
       } else {

+ 1 - 2
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java

@@ -29,7 +29,6 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
 
@@ -83,7 +82,7 @@ public class RangerKmsProxyConfig extends AbstractServerAction {
       targetValues.put(groupProp, "*");
       targetValues.put(hostProp, "*");
       kmsSite.setProperties(targetValues);
-      kmsSite.persist(false);
+      kmsSite.save();
       outputMsg = outputMsg + MessageFormat.format("Successfully added properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);
     } else {
       outputMsg = outputMsg +  MessageFormat.format("Kerberos not enable, not setting proxy properties to {0}", RANGER_KMS_SITE_CONFIG_TYPE);

+ 1 - 2
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java

@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.serveraction.AbstractServerAction;
@@ -89,7 +88,7 @@ public class SparkShufflePropertyConfig extends AbstractServerAction {
       yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES, newAuxServices);
       yarnSiteProperties.put(YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS, YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS_VALUE);
       yarnSiteConfig.setProperties(yarnSiteProperties);
-      yarnSiteConfig.persist(false);
+    yarnSiteConfig.save();
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("%s was set from %s to %s. %s was set to %s",

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java

@@ -67,7 +67,7 @@ public class YarnConfigCalculation extends AbstractServerAction {
     yarnSiteProperties.put(YARN_RM_ZK_ADDRESS_PROPERTY_NAME, zkServersStr);
     yarnSiteProperties.put(HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME, zkServersStr);
     yarnSiteConfig.setProperties(yarnSiteProperties);
-    yarnSiteConfig.persist(false);
+    yarnSiteConfig.save();
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
         String.format("%s was set from %s to %s. %s was set from %s to %s",

+ 1 - 21
ambari-server/src/main/java/org/apache/ambari/server/state/Config.java

@@ -30,8 +30,6 @@ public interface Config {
 
   void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes);
 
-  void setStackId(StackId stackId);
-
   /**
    * @return Config Type
    */
@@ -65,18 +63,6 @@ public interface Config {
    */
   public Map<String, Map<String, String>> getPropertiesAttributes();
 
-  /**
-   * Change the version tag
-   * @param versionTag
-   */
-  public void setTag(String versionTag);
-
-  /**
-   * Set config version
-   * @param version
-   */
-  public void setVersion(Long version);
-
   /**
    * Replace properties with new provided set
    * @param properties Property Map to replace existing one
@@ -110,11 +96,5 @@ public interface Config {
   /**
    * Persist the configuration.
    */
-  public void persist();
-
-  /**
-   * Persist the configuration, optionally creating a new config entity.
-   */
-  public void persist(boolean newConfig);
-
+  public void save();
 }

+ 17 - 3
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java

@@ -27,18 +27,20 @@ import com.google.inject.assistedinject.Assisted;
  * Factory for creating configuration objects using {@link Assisted} constructor parameters
  */
 public interface ConfigFactory {
-  
+
   /**
    * Creates a new {@link Config} object using provided values.
    *
    * @param cluster
    * @param type
+   * @param tag
    * @param map
    * @param mapAttributes
    * @return
    */
-  Config createNew(Cluster cluster, String type, Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
-  
+  Config createNew(Cluster cluster, @Assisted("type") String type, @Assisted("tag") String tag,
+      Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
+
   /**
    * Creates a new {@link Config} object using provided entity
    *
@@ -48,4 +50,16 @@ public interface ConfigFactory {
    */
   Config createExisting(Cluster cluster, ClusterConfigEntity entity);
 
+  /**
+   * Creates a read-only instance of a {@link Config} suitable for returning in
+   * REST responses.
+   *
+   * @param type
+   * @param tag
+   * @param map
+   * @param mapAttributes
+   * @return
+   */
+  Config createReadOnly(@Assisted("type") String type, @Assisted("tag") String tag,
+      Map<String, String> map, Map<String, Map<String, String>> mapAttributes);
 }

+ 225 - 255
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java

@@ -18,27 +18,29 @@
 
 package org.apache.ambari.server.state;
 
-import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.annotation.Nullable;
 
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.gson.Gson;
+import com.google.gson.JsonSyntaxException;
 import com.google.inject.Inject;
-import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
@@ -49,52 +51,113 @@ public class ConfigImpl implements Config {
    */
   private final static Logger LOG = LoggerFactory.getLogger(ConfigImpl.class);
 
+  /**
+   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   */
+  private static final String PROPERTY_LOCK_LABEL = "configurationPropertyLock";
+
   public static final String GENERATED_TAG_PREFIX = "generatedTag_";
 
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+  private final long configId;
+  private final Cluster cluster;
+  private final StackId stackId;
+  private final String type;
+  private final String tag;
+  private final Long version;
 
-  private Cluster cluster;
-  private StackId stackId;
-  private String type;
-  private volatile String tag;
-  private volatile Long version;
-  private volatile Map<String, String> properties;
-  private volatile Map<String, Map<String, String>> propertiesAttributes;
-  private ClusterConfigEntity entity;
-  private volatile Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+  /**
+   * The properties of this configuration. This cannot be a
+   * {@link ConcurrentMap} since we allow null values. Therefore, it must be
+   * synchronized externally.
+   */
+  private Map<String, String> properties;
 
-  @Inject
-  private ClusterDAO clusterDAO;
+  /**
+   * A lock for reading/writing of {@link #properties} concurrently.
+   *
+   * @see #properties
+   */
+  private final ReadWriteLock propertyLock;
 
-  @Inject
-  private Gson gson;
+  /**
+   * The property attributes for this configuration.
+   */
+  private Map<String, Map<String, String>> propertiesAttributes;
+
+  private Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes;
+
+  private final ClusterDAO clusterDAO;
+
+  private final Gson gson;
 
   @Inject
   private ServiceConfigDAO serviceConfigDAO;
 
-  @Inject
-  private AmbariEventPublisher eventPublisher;
+  private final AmbariEventPublisher eventPublisher;
 
   @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted String type, @Assisted Map<String, String> properties,
-      @Assisted Map<String, Map<String, String>> propertiesAttributes, Injector injector) {
+  ConfigImpl(@Assisted Cluster cluster, @Assisted("type") String type,
+      @Assisted("tag") @Nullable String tag,
+      @Assisted Map<String, String> properties,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
     this.cluster = cluster;
     this.type = type;
     this.properties = properties;
-    this.propertiesAttributes = propertiesAttributes;
+
+    // only set this if it's non-null
+    this.propertiesAttributes = null == propertiesAttributes ? null
+        : new HashMap<>(propertiesAttributes);
+
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+    version = cluster.getNextConfigVersion(type);
+
+    // tag is nullable from factory but not in the DB, so ensure we generate something
+    tag = StringUtils.isBlank(tag) ? GENERATED_TAG_PREFIX + version : tag;
+    this.tag = tag;
+
+    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+    ClusterConfigEntity entity = new ClusterConfigEntity();
+    entity.setClusterEntity(clusterEntity);
+    entity.setClusterId(cluster.getClusterId());
+    entity.setType(type);
+    entity.setVersion(version);
+    entity.setTag(this.tag);
+    entity.setTimestamp(System.currentTimeMillis());
+    entity.setStack(clusterEntity.getDesiredStack());
+    entity.setData(gson.toJson(properties));
+
+    if (null != propertiesAttributes) {
+      entity.setAttributes(gson.toJson(propertiesAttributes));
+    }
 
     // when creating a brand new config without a backing entity, use the
     // cluster's desired stack as the config's stack
     stackId = cluster.getDesiredStackVersion();
-
-    injector.injectMembers(this);
     propertiesTypes = cluster.getConfigPropertiesTypes(type);
-  }
+    persist(entity);
 
+    configId = entity.getConfigId();
+  }
 
   @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity, Injector injector) {
+  ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity,
+      ClusterDAO clusterDAO, Gson gson, AmbariEventPublisher eventPublisher,
+      LockFactory lockFactory) {
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
     this.cluster = cluster;
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+    configId = entity.getConfigId();
+
     type = entity.getType();
     tag = entity.getTag();
     version = entity.getVersion();
@@ -102,16 +165,71 @@ public class ConfigImpl implements Config {
     // when using an existing entity, use the actual value of the entity's stack
     stackId = new StackId(entity.getStack());
 
-    this.entity = entity;
-    injector.injectMembers(this);
     propertiesTypes = cluster.getConfigPropertiesTypes(type);
+
+    // incur the hit on deserialization since this business object is stored locally
+    try {
+      Map<String, String> deserializedProperties = gson.<Map<String, String>> fromJson(
+          entity.getData(), Map.class);
+
+      if (null == deserializedProperties) {
+        deserializedProperties = new HashMap<>();
+      }
+
+      properties = deserializedProperties;
+    } catch (JsonSyntaxException e) {
+      LOG.error("Malformed configuration JSON stored in the database for {}/{}", entity.getType(),
+          entity.getTag());
+    }
+
+    // incur the hit on deserialization since this business object is stored locally
+    try {
+      Map<String, Map<String, String>> deserializedAttributes = gson.<Map<String, Map<String, String>>> fromJson(
+          entity.getAttributes(), Map.class);
+
+      if (null != deserializedAttributes) {
+        propertiesAttributes = new HashMap<>(deserializedAttributes);
+      }
+    } catch (JsonSyntaxException e) {
+      LOG.error("Malformed configuration attribute JSON stored in the database for {}/{}",
+          entity.getType(), entity.getTag());
+    }
   }
 
   /**
-   * Constructor for clients not using factory.
+   * Constructor. This will create an instance suitable only for
+   * representation/serialization as it is incomplete.
+   *
+   * @param type
+   * @param tag
+   * @param properties
+   * @param propertiesAttributes
+   * @param clusterDAO
+   * @param gson
+   * @param eventPublisher
    */
-  public ConfigImpl(String type) {
+  @AssistedInject
+  ConfigImpl(@Assisted("type") String type,
+      @Assisted("tag") @Nullable String tag,
+      @Assisted Map<String, String> properties,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
+
+    propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
+
+    this.tag = tag;
     this.type = type;
+    this.properties = new HashMap<>(properties);
+    this.propertiesAttributes = null == propertiesAttributes ? null
+        : new HashMap<>(propertiesAttributes);
+    this.clusterDAO = clusterDAO;
+    this.gson = gson;
+    this.eventPublisher = eventPublisher;
+
+    cluster = null;
+    configId = 0;
+    version = 0L;
+    stackId = null;
   }
 
   /**
@@ -119,232 +237,124 @@ public class ConfigImpl implements Config {
    */
   @Override
   public StackId getStackId() {
-    readWriteLock.readLock().lock();
-    try {
-      return stackId;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return stackId;
   }
 
   @Override
   public Map<PropertyInfo.PropertyType, Set<String>> getPropertiesTypes() {
-    readWriteLock.readLock().lock();
-    try {
-      return propertiesTypes;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return propertiesTypes;
   }
 
   @Override
   public void setPropertiesTypes(Map<PropertyInfo.PropertyType, Set<String>> propertiesTypes) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.propertiesTypes = propertiesTypes;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void setStackId(StackId stackId) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.stackId = stackId;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    this.propertiesTypes = propertiesTypes;
   }
 
   @Override
   public String getType() {
-    readWriteLock.readLock().lock();
-    try {
-      return type;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return type;
   }
 
   @Override
   public String getTag() {
-    if (tag == null) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (tag == null) {
-          tag = GENERATED_TAG_PREFIX + getVersion();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-
-      return tag;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return tag;
   }
 
   @Override
   public Long getVersion() {
-    if (version == null && cluster != null) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (version == null) {
-          version = cluster.getNextConfigVersion(type); //pure DB calculation call, no cluster locking required
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-      return version;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return version;
   }
 
   @Override
   public Map<String, String> getProperties() {
-    if (null != entity && null == properties) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (properties == null) {
-          properties = gson.<Map<String, String>>fromJson(entity.getData(), Map.class);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
+    propertyLock.readLock().lock();
     try {
-      return null == properties ? new HashMap<String, String>()
-          : new HashMap<String, String>(properties);
+      return properties == null ? new HashMap<String, String>() : new HashMap<>(properties);
     } finally {
-      readWriteLock.readLock().unlock();
+      propertyLock.readLock().unlock();
     }
-
   }
 
   @Override
   public Map<String, Map<String, String>> getPropertiesAttributes() {
-    if (null != entity && null == propertiesAttributes) {
-      readWriteLock.writeLock().lock();
-      try {
-        if (propertiesAttributes == null) {
-          propertiesAttributes = gson.<Map<String, Map<String, String>>>fromJson(entity.getAttributes(), Map.class);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    }
-
-    readWriteLock.readLock().lock();
-    try {
-      return null == propertiesAttributes ? null : new HashMap<String, Map<String, String>>(propertiesAttributes);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void setTag(String tag) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.tag = tag;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void setVersion(Long version) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.version = version;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    return null == propertiesAttributes ? null
+        : new HashMap<String, Map<String, String>>(propertiesAttributes);
   }
 
   @Override
   public void setProperties(Map<String, String> properties) {
-    readWriteLock.writeLock().lock();
+    propertyLock.writeLock().lock();
     try {
       this.properties = properties;
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
-
   }
 
   @Override
   public void setPropertiesAttributes(Map<String, Map<String, String>> propertiesAttributes) {
-    readWriteLock.writeLock().lock();
-    try {
-      this.propertiesAttributes = propertiesAttributes;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    this.propertiesAttributes = propertiesAttributes;
   }
 
   @Override
-  public void updateProperties(Map<String, String> properties) {
-    readWriteLock.writeLock().lock();
+  public void updateProperties(Map<String, String> propertiesToUpdate) {
+    propertyLock.writeLock().lock();
     try {
-      this.properties.putAll(properties);
+      properties.putAll(propertiesToUpdate);
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
-
   }
 
   @Override
   public List<Long> getServiceConfigVersions() {
-    readWriteLock.readLock().lock();
-    try {
-      if (cluster == null || type == null || version == null) {
-        return Collections.emptyList();
-      }
-      return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return serviceConfigDAO.getServiceConfigVersionsByConfig(cluster.getClusterId(), type, version);
   }
 
   @Override
-  public void deleteProperties(List<String> properties) {
-    readWriteLock.writeLock().lock();
+  public void deleteProperties(List<String> propertyKeysToRemove) {
+    propertyLock.writeLock().lock();
     try {
-      for (String key : properties) {
-        this.properties.remove(key);
-      }
+      Set<String> keySet = properties.keySet();
+      keySet.removeAll(propertyKeysToRemove);
     } finally {
-      readWriteLock.writeLock().unlock();
+      propertyLock.writeLock().unlock();
     }
+  }
+
+  /**
+   * Persist the entity and update the internal state relationships once the
+   * transaction has been committed.
+   */
+  private void persist(ClusterConfigEntity entity) {
+    persistEntitiesInTransaction(entity);
 
+    // ensure that the in-memory state of the cluster is kept consistent
+    cluster.addConfig(this);
+
+    // re-load the entity associations for the cluster
+    cluster.refresh();
+
+    // broadcast the change event for the configuration
+    ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+        getType(), getTag(), getVersion());
+
+    eventPublisher.publish(event);
   }
 
-  @Override
-  public void persist() {
-    persist(true);
+  /**
+   * Persist the cluster and configuration entities in their own transaction.
+   */
+  @Transactional
+  void persistEntitiesInTransaction(ClusterConfigEntity entity) {
+    ClusterEntity clusterEntity = entity.getClusterEntity();
+
+    clusterDAO.createConfig(entity);
+    clusterEntity.getClusterConfigEntities().add(entity);
+
+    // save the entity, forcing a flush to ensure the refresh picks up the
+    // newest data
+    clusterDAO.merge(clusterEntity, true);
   }
 
   /**
@@ -352,69 +362,29 @@ public class ConfigImpl implements Config {
    */
   @Override
   @Transactional
-  public void persist(boolean newConfig) {
-    readWriteLock.writeLock().lock();
-    try {
-      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
-      if (newConfig) {
-        ClusterConfigEntity entity = new ClusterConfigEntity();
-        entity.setClusterEntity(clusterEntity);
-        entity.setClusterId(cluster.getClusterId());
-        entity.setType(getType());
-        entity.setVersion(getVersion());
-        entity.setTag(getTag());
-        entity.setTimestamp(new Date().getTime());
-        entity.setStack(clusterEntity.getDesiredStack());
-        entity.setData(gson.toJson(getProperties()));
-
-        if (null != getPropertiesAttributes()) {
-          entity.setAttributes(gson.toJson(getPropertiesAttributes()));
-        }
-
-        clusterDAO.createConfig(entity);
-        clusterEntity.getClusterConfigEntities().add(entity);
-
-        // save the entity, forcing a flush to ensure the refresh picks up the
-        // newest data
-        clusterDAO.merge(clusterEntity, true);
-      } else {
-        // only supporting changes to the properties
-        ClusterConfigEntity entity = null;
-
-        // find the existing configuration to update
-        for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
-          if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
-              && getVersion().equals(cfe.getVersion())) {
-            entity = cfe;
-            break;
-          }
-        }
-
-        // if the configuration was found, then update it
-        if (null != entity) {
-          LOG.debug(
-              "Updating {} version {} with new configurations; a new version will not be created",
-              getType(), getVersion());
-
-          entity.setData(gson.toJson(getProperties()));
-
-          // save the entity, forcing a flush to ensure the refresh picks up the
-          // newest data
-          clusterDAO.merge(clusterEntity, true);
-        }
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+  public void save() {
+    ClusterConfigEntity entity = clusterDAO.findConfig(configId);
+    ClusterEntity clusterEntity = clusterDAO.findById(entity.getClusterId());
 
-    // re-load the entity associations for the cluster
-    cluster.refresh();
+    // if the configuration was found, then update it
+    if (null != entity) {
+      LOG.debug("Updating {} version {} with new configurations; a new version will not be created",
+          getType(), getVersion());
 
-    // broadcast the change event for the configuration
-    ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
-        getType(), getTag(), getVersion());
+      entity.setData(gson.toJson(getProperties()));
+
+      // save the entity, forcing a flush to ensure the refresh picks up the
+      // newest data
+      clusterDAO.merge(clusterEntity, true);
+
+      // re-load the entity associations for the cluster
+      cluster.refresh();
+
+      // broadcast the change event for the configuration
+      ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
+          getType(), getTag(), getVersion());
 
       eventPublisher.publish(event);
+    }
   }
 }

+ 4 - 2
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java

@@ -326,8 +326,11 @@ public class ClusterImpl implements Cluster {
     loadStackVersion();
     loadServices();
     loadServiceHostComponents();
-    loadConfigGroups();
+
+    // cache configurations before loading configuration groups
     cacheConfigurations();
+    loadConfigGroups();
+
     loadRequestExecutions();
 
     if (desiredStackVersion != null && !StringUtils.isEmpty(desiredStackVersion.getStackName()) && !
@@ -2566,7 +2569,6 @@ public class ClusterImpl implements Cluster {
           }
         }
         configGroup.setHosts(groupDesiredHosts);
-        configGroup.persist();
       } else {
         throw new IllegalArgumentException("Config group {} doesn't exist");
       }

+ 6 - 27
ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java

@@ -18,13 +18,13 @@
 
 package org.apache.ambari.server.state.configgroup;
 
+import java.util.Map;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Host;
 
-import java.util.Map;
-
 /**
  * Configuration group or Config group is a type of Ambari resource that
  * supports grouping of configuration resources and host resources for a
@@ -80,28 +80,19 @@ public interface ConfigGroup {
   public void setDescription(String description);
 
   /**
-   * List of hosts to which configs are applied
+   * Gets an unmodifiable list of {@link Host}s.
+   *
    * @return
    */
   public Map<Long, Host> getHosts();
 
   /**
-   * List of @Config objects
+   * Gets an unmodifiable map of {@link Config}s.
+   *
    * @return
    */
   public Map<String, Config> getConfigurations();
 
-  /**
-   * Persist the Config group along with the related host and config mapping
-   * entities to the persistence store
-   */
-  void persist();
-
-  /**
-   * Persist the host mapping entity to the persistence store
-   */
-  void persistHostMapping();
-
   /**
    * Delete config group and the related host and config mapping
    * entities from the persistence store
@@ -115,13 +106,6 @@ public interface ConfigGroup {
    */
   public void addHost(Host host) throws AmbariException;
 
-  /**
-   * Add config to the config group
-   * @param config
-   * @throws AmbariException
-   */
-  public void addConfiguration(Config config) throws AmbariException;
-
   /**
    * Return @ConfigGroupResponse for the config group
    *
@@ -130,11 +114,6 @@ public interface ConfigGroup {
    */
   public ConfigGroupResponse convertToResponse() throws AmbariException;
 
-  /**
-   * Refresh Config group and the host and config mappings for the group
-   */
-  public void refresh();
-
   /**
    * Reassign the set of hosts associated with this config group
    * @param hosts

+ 25 - 9
ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java

@@ -17,22 +17,38 @@
  */
 package org.apache.ambari.server.state.configgroup;
 
-import com.google.inject.assistedinject.Assisted;
+import java.util.Map;
+
 import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.configgroup.ConfigGroup;
 
-import java.util.Map;
+import com.google.inject.assistedinject.Assisted;
 
 public interface ConfigGroupFactory {
-  ConfigGroup createNew(@Assisted("cluster") Cluster cluster,
-                       @Assisted("name") String name,
-                       @Assisted("tag") String tag,
-                       @Assisted("description") String description,
-                       @Assisted("configs") Map<String, Config> configs,
-                       @Assisted("hosts") Map<Long, Host> hosts);
+  /**
+   * Creates and saves a new {@link ConfigGroup}.
+   *
+   * @param cluster
+   * @param name
+   * @param tag
+   * @param description
+   * @param configs
+   * @param hosts
+   * @param serviceName
+   * @return
+   */
+  ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+      @Assisted("tag") String tag, @Assisted("description") String description,
+      @Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
 
+  /**
+   * Instantiates a {@link ConfigGroup} fron an existing, persisted entity.
+   *
+   * @param cluster
+   * @param entity
+   * @return
+   */
   ConfigGroup createExisting(Cluster cluster, ConfigGroupEntity entity);
 }

+ 278 - 335
ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java

@@ -17,18 +17,22 @@
  */
 package org.apache.ambari.server.state.configgroup;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
 import org.apache.ambari.server.controller.internal.ConfigurationResourceProvider;
+import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
@@ -44,213 +48,195 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
 
 public class ConfigGroupImpl implements ConfigGroup {
   private static final Logger LOG = LoggerFactory.getLogger(ConfigGroupImpl.class);
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
 
   private Cluster cluster;
-  private ConfigGroupEntity configGroupEntity;
-  private Map<Long, Host> hosts;
-  private Map<String, Config> configurations;
-  private volatile boolean isPersisted = false;
-
-  @Inject
-  private Gson gson;
-  @Inject
-  private ConfigGroupDAO configGroupDAO;
-  @Inject
-  private ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
-  @Inject
-  private ConfigGroupHostMappingDAO configGroupHostMappingDAO;
-  @Inject
-  private HostDAO hostDAO;
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  Clusters clusters;
+  private ConcurrentMap<Long, Host> m_hosts;
+  private ConcurrentMap<String, Config> m_configurations;
+  private String configGroupName;
+  private long configGroupId;
+
+  /**
+   * This lock is required to prevent inconsistencies in internal state between
+   * {@link #m_hosts} and the entities stored by the {@link ConfigGroupEntity}.
+   */
+  private final ReadWriteLock hostLock;
+
+  /**
+   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   */
+  private static final String hostLockLabel = "configurationGroupHostLock";
+
+  private final ConfigGroupDAO configGroupDAO;
+
+  private final ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
+
+  private final ConfigGroupHostMappingDAO configGroupHostMappingDAO;
+
+  private final HostDAO hostDAO;
+
+  private final ClusterDAO clusterDAO;
+
+  private final ConfigFactory configFactory;
 
   @AssistedInject
-  public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
-                         @Assisted("name") String name,
-                         @Assisted("tag") String tag,
-                         @Assisted("description") String description,
-                         @Assisted("configs") Map<String, Config> configs,
-                         @Assisted("hosts") Map<Long, Host> hosts,
-                         Injector injector) {
-    injector.injectMembers(this);
+  public ConfigGroupImpl(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+      @Assisted("tag") String tag, @Assisted("description") String description,
+      @Assisted("configs") Map<String, Config> configurations,
+      @Assisted("hosts") Map<Long, Host> hosts, Clusters clusters, ConfigFactory configFactory,
+      ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
+      ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
+      ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+
+    this.configFactory = configFactory;
+    this.clusterDAO = clusterDAO;
+    this.hostDAO = hostDAO;
+    this.configGroupDAO = configGroupDAO;
+    this.configGroupConfigMappingDAO = configGroupConfigMappingDAO;
+    this.configGroupHostMappingDAO = configGroupHostMappingDAO;
+
+    hostLock = lockFactory.newReadWriteLock(hostLockLabel);
+
     this.cluster = cluster;
+    configGroupName = name;
 
-    configGroupEntity = new ConfigGroupEntity();
+    ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
     configGroupEntity.setClusterId(cluster.getClusterId());
     configGroupEntity.setGroupName(name);
     configGroupEntity.setTag(tag);
     configGroupEntity.setDescription(description);
 
-    if (hosts != null) {
-      this.hosts = hosts;
-    } else {
-      this.hosts = new HashMap<Long, Host>();
-    }
+    m_hosts = hosts == null ? new ConcurrentHashMap<Long, Host>()
+        : new ConcurrentHashMap<>(hosts);
 
-    if (configs != null) {
-      configurations = configs;
-    } else {
-      configurations = new HashMap<String, Config>();
-    }
+    m_configurations = configurations == null ? new ConcurrentHashMap<String, Config>()
+        : new ConcurrentHashMap<>(configurations);
+
+    // save the entity and grab the ID
+    persist(configGroupEntity);
+    configGroupId = configGroupEntity.getGroupId();
   }
 
   @AssistedInject
-  public ConfigGroupImpl(@Assisted Cluster cluster,
-                         @Assisted ConfigGroupEntity configGroupEntity,
-                         Injector injector) {
-    injector.injectMembers(this);
+  public ConfigGroupImpl(@Assisted Cluster cluster, @Assisted ConfigGroupEntity configGroupEntity,
+      Clusters clusters, ConfigFactory configFactory,
+      ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
+      ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
+      ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+
+    this.configFactory = configFactory;
+    this.clusterDAO = clusterDAO;
+    this.hostDAO = hostDAO;
+    this.configGroupDAO = configGroupDAO;
+    this.configGroupConfigMappingDAO = configGroupConfigMappingDAO;
+    this.configGroupHostMappingDAO = configGroupHostMappingDAO;
+
+    hostLock = lockFactory.newReadWriteLock(hostLockLabel);
+
     this.cluster = cluster;
+    configGroupId = configGroupEntity.getGroupId();
+    configGroupName = configGroupEntity.getGroupName();
 
-    this.configGroupEntity = configGroupEntity;
-    configurations = new HashMap<String, Config>();
-    hosts = new HashMap<Long, Host>();
+    m_configurations = new ConcurrentHashMap<String, Config>();
+    m_hosts = new ConcurrentHashMap<Long, Host>();
 
     // Populate configs
-    for (ConfigGroupConfigMappingEntity configMappingEntity : configGroupEntity
-      .getConfigGroupConfigMappingEntities()) {
-
+    for (ConfigGroupConfigMappingEntity configMappingEntity : configGroupEntity.getConfigGroupConfigMappingEntities()) {
       Config config = cluster.getConfig(configMappingEntity.getConfigType(),
         configMappingEntity.getVersionTag());
 
       if (config != null) {
-        configurations.put(config.getType(), config);
+        m_configurations.put(config.getType(), config);
       } else {
-        LOG.warn("Unable to find config mapping for config group"
-          + ", clusterName = " + cluster.getClusterName()
-          + ", type = " + configMappingEntity.getConfigType()
-          + ", tag = " + configMappingEntity.getVersionTag());
+        LOG.warn("Unable to find config mapping {}/{} for config group in cluster {}",
+            configMappingEntity.getConfigType(), configMappingEntity.getVersionTag(),
+            cluster.getClusterName());
       }
     }
 
     // Populate Hosts
-    for (ConfigGroupHostMappingEntity hostMappingEntity : configGroupEntity
-      .getConfigGroupHostMappingEntities()) {
-
+    for (ConfigGroupHostMappingEntity hostMappingEntity : configGroupEntity.getConfigGroupHostMappingEntities()) {
       try {
         Host host = clusters.getHost(hostMappingEntity.getHostname());
         HostEntity hostEntity = hostMappingEntity.getHostEntity();
         if (host != null && hostEntity != null) {
-          hosts.put(hostEntity.getHostId(), host);
+          m_hosts.put(hostEntity.getHostId(), host);
         }
       } catch (AmbariException e) {
-        String msg = "Host seems to be deleted but Config group mapping still " +
-          "exists !";
-        LOG.warn(msg);
-        LOG.debug(msg, e);
+        LOG.warn("Host seems to be deleted but Config group mapping still exists !");
+        LOG.debug("Host seems to be deleted but Config group mapping still exists !", e);
       }
     }
-
-    isPersisted = true;
   }
 
   @Override
   public Long getId() {
-    return configGroupEntity.getGroupId();
+    return configGroupId;
   }
 
   @Override
   public String getName() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getGroupName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return configGroupName;
   }
 
   @Override
   public void setName(String name) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setGroupName(name);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setGroupName(name);
+    configGroupDAO.merge(configGroupEntity);
 
+    configGroupName = name;
   }
 
   @Override
   public String getClusterName() {
-    return configGroupEntity.getClusterEntity().getClusterName();
+    return cluster.getClusterName();
   }
 
   @Override
   public String getTag() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getTag();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getTag();
   }
 
   @Override
   public void setTag(String tag) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setTag(tag);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setTag(tag);
+    configGroupDAO.merge(configGroupEntity);
   }
 
   @Override
   public String getDescription() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getDescription();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getDescription();
   }
 
   @Override
   public void setDescription(String description) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setDescription(description);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setDescription(description);
+    configGroupDAO.merge(configGroupEntity);
   }
 
   @Override
   public Map<Long, Host> getHosts() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(hosts);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return Collections.unmodifiableMap(m_hosts);
   }
 
   @Override
   public Map<String, Config> getConfigurations() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(configurations);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return Collections.unmodifiableMap(m_configurations);
   }
 
   /**
@@ -259,13 +245,14 @@ public class ConfigGroupImpl implements ConfigGroup {
    */
   @Override
   public void setHosts(Map<Long, Host> hosts) {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      this.hosts = hosts;
+      // persist enitites in a transaction first, then update internal state
+      replaceHostMappings(hosts);
+      m_hosts = new ConcurrentHashMap<>(hosts);
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
-
   }
 
   /**
@@ -273,115 +260,140 @@ public class ConfigGroupImpl implements ConfigGroup {
    * @param configs
    */
   @Override
-  public void setConfigurations(Map<String, Config> configs) {
-    readWriteLock.writeLock().lock();
-    try {
-      configurations = configs;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
+  public void setConfigurations(Map<String, Config> configurations) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ClusterEntity clusterEntity = configGroupEntity.getClusterEntity();
+
+    // only update the internal state after the configurations have been
+    // persisted
+    persistConfigMapping(clusterEntity, configGroupEntity, configurations);
+    m_configurations = new ConcurrentHashMap<>(configurations);
   }
 
   @Override
-  @Transactional
   public void removeHost(Long hostId) throws AmbariException {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      if (hosts.containsKey(hostId)) {
-        String hostName = hosts.get(hostId).getHostName();
-        LOG.info("Removing host from config group, hostid = " + hostId + ", hostname = " + hostName);
-        hosts.remove(hostId);
-        try {
-          ConfigGroupHostMappingEntityPK hostMappingEntityPK = new
-            ConfigGroupHostMappingEntityPK();
-          hostMappingEntityPK.setHostId(hostId);
-          hostMappingEntityPK.setConfigGroupId(configGroupEntity.getGroupId());
-          configGroupHostMappingDAO.removeByPK(hostMappingEntityPK);
-        } catch (Exception e) {
-          LOG.error("Failed to delete config group host mapping"
-            + ", clusterName = " + getClusterName()
-            + ", id = " + getId()
-            + ", hostid = " + hostId
-            + ", hostname = " + hostName, e);
-          throw new AmbariException(e.getMessage());
-        }
+      Host host = m_hosts.get(hostId);
+      if (null == host) {
+        return;
       }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
 
-  @Override
-  public void persist() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (!isPersisted) {
-        persistEntities();
-        refresh();
-        cluster.refresh();
-        isPersisted = true;
-      } else {
-        saveIfPersisted();
+      String hostName = host.getHostName();
+      LOG.info("Removing host (id={}, name={}) from config group", host.getHostId(), hostName);
+
+      try {
+        // remove the entities first, then update internal state
+        removeConfigGroupHostEntity(host);
+        m_hosts.remove(hostId);
+      } catch (Exception e) {
+        LOG.error("Failed to delete config group host mapping for cluster {} and host {}",
+            cluster.getClusterName(), hostName, e);
+
+        throw new AmbariException(e.getMessage());
       }
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
   }
 
+  /**
+   * Removes the {@link ConfigGroupHostMappingEntity} for the specified host
+   * from this configuration group.
+   *
+   * @param host
+   *          the host to remove.
+   */
+  @Transactional
+  void removeConfigGroupHostEntity(Host host) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ConfigGroupHostMappingEntityPK hostMappingEntityPK = new ConfigGroupHostMappingEntityPK();
+    hostMappingEntityPK.setHostId(host.getHostId());
+    hostMappingEntityPK.setConfigGroupId(configGroupId);
+
+    ConfigGroupHostMappingEntity configGroupHostMapping = configGroupHostMappingDAO.findByPK(
+        hostMappingEntityPK);
+
+    configGroupHostMappingDAO.remove(configGroupHostMapping);
+
+    configGroupEntity.getConfigGroupHostMappingEntities().remove(configGroupHostMapping);
+    configGroupEntity = configGroupDAO.merge(getConfigGroupEntity());
+  }
+
+  /**
+   * @param configGroupEntity
+   */
+  private void persist(ConfigGroupEntity configGroupEntity) {
+    persistEntities(configGroupEntity);
+    cluster.refresh();
+  }
+
   /**
    * Persist Config group with host mapping and configurations
    *
    * @throws Exception
    */
   @Transactional
-  void persistEntities() {
+  void persistEntities(ConfigGroupEntity configGroupEntity) {
     ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
     configGroupEntity.setClusterEntity(clusterEntity);
     configGroupEntity.setTimestamp(System.currentTimeMillis());
     configGroupDAO.create(configGroupEntity);
 
-    persistConfigMapping(clusterEntity);
-    persistHostMapping();
-  }
+    configGroupId = configGroupEntity.getGroupId();
 
-  // TODO: Test rollback scenario
+    persistConfigMapping(clusterEntity, configGroupEntity, m_configurations);
+    replaceHostMappings(m_hosts);
+  }
 
   /**
-   * Persist host mapping
+   * Replaces all existing host mappings with the new collection of hosts.
    *
+   * @param the
+   *          new hosts
    * @throws Exception
    */
-  @Override
   @Transactional
-  public void persistHostMapping() {
-    if (isPersisted) {
-      // Delete existing mappings and create new ones
-      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupEntity.setConfigGroupHostMappingEntities(new HashSet<ConfigGroupHostMappingEntity>());
-    }
+  void replaceHostMappings(Map<Long, Host> hosts) {
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+
+    // Delete existing mappings and create new ones
+    configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+    configGroupEntity.setConfigGroupHostMappingEntities(
+        new HashSet<ConfigGroupHostMappingEntity>());
 
     if (hosts != null && !hosts.isEmpty()) {
-      for (Host host : hosts.values()) {
-        HostEntity hostEntity = hostDAO.findById(host.getHostId());
-        if (hostEntity != null) {
-          ConfigGroupHostMappingEntity hostMappingEntity = new
-            ConfigGroupHostMappingEntity();
-          hostMappingEntity.setHostId(hostEntity.getHostId());
-          hostMappingEntity.setHostEntity(hostEntity);
-          hostMappingEntity.setConfigGroupEntity(configGroupEntity);
-          hostMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
-          configGroupEntity.getConfigGroupHostMappingEntities().add
-                  (hostMappingEntity);
-          configGroupHostMappingDAO.create(hostMappingEntity);
-        } else {
-          LOG.warn("Host seems to be deleted, cannot create host to config " +
-            "group mapping, host = " + host.getHostName());
-        }
+      configGroupEntity = persistHostMapping(hosts.values(), configGroupEntity);
+    }
+  }
+
+  /**
+   * Adds the collection of hosts to the configuration group.
+   *
+   * @param hostEntity
+   * @param configGroupEntity
+   */
+  @Transactional
+  ConfigGroupEntity persistHostMapping(Collection<Host> hosts,
+      ConfigGroupEntity configGroupEntity) {
+    for (Host host : hosts) {
+      HostEntity hostEntity = hostDAO.findById(host.getHostId());
+      if (hostEntity != null) {
+        ConfigGroupHostMappingEntity hostMappingEntity = new ConfigGroupHostMappingEntity();
+        hostMappingEntity.setHostId(hostEntity.getHostId());
+        hostMappingEntity.setHostEntity(hostEntity);
+        hostMappingEntity.setConfigGroupEntity(configGroupEntity);
+        hostMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
+        configGroupEntity.getConfigGroupHostMappingEntities().add(hostMappingEntity);
+        configGroupHostMappingDAO.create(hostMappingEntity);
+      } else {
+        LOG.warn(
+            "The host {} has been removed from the cluster and cannot be added to the configuration group {}",
+            host.getHostName(), configGroupName);
       }
     }
-    // TODO: Make sure this does not throw Nullpointer based on JPA docs
-    configGroupEntity = configGroupDAO.merge(configGroupEntity);
+
+    return configGroupDAO.merge(configGroupEntity);
   }
 
   /**
@@ -391,42 +403,31 @@ public class ConfigGroupImpl implements ConfigGroup {
    * @throws Exception
    */
   @Transactional
-  void persistConfigMapping(ClusterEntity clusterEntity) {
-    if (isPersisted) {
-      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupEntity.setConfigGroupConfigMappingEntities(new HashSet<ConfigGroupConfigMappingEntity>());
-    }
+  void persistConfigMapping(ClusterEntity clusterEntity,
+      ConfigGroupEntity configGroupEntity, Map<String, Config> configurations) {
+    configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+    configGroupEntity.setConfigGroupConfigMappingEntities(
+        new HashSet<ConfigGroupConfigMappingEntity>());
 
     if (configurations != null && !configurations.isEmpty()) {
-      for (Config config : configurations.values()) {
+      for (Entry<String, Config> entry : configurations.entrySet()) {
+        Config config = entry.getValue();
         ClusterConfigEntity clusterConfigEntity = clusterDAO.findConfig
           (cluster.getClusterId(), config.getType(), config.getTag());
 
         if (clusterConfigEntity == null) {
-          config.setVersion(cluster.getNextConfigVersion(config.getType()));
-          config.setStackId(cluster.getDesiredStackVersion());
-          // Create configuration
-          clusterConfigEntity = new ClusterConfigEntity();
-          clusterConfigEntity.setClusterId(clusterEntity.getClusterId());
-          clusterConfigEntity.setClusterEntity(clusterEntity);
-          clusterConfigEntity.setStack(clusterEntity.getDesiredStack());
-          clusterConfigEntity.setType(config.getType());
-          clusterConfigEntity.setVersion(config.getVersion());
-          clusterConfigEntity.setTag(config.getTag());
-          clusterConfigEntity.setData(gson.toJson(config.getProperties()));
-          if (null != config.getPropertiesAttributes()) {
-            clusterConfigEntity.setAttributes(gson.toJson(config.getPropertiesAttributes()));
-          }
-          clusterConfigEntity.setTimestamp(System.currentTimeMillis());
-          clusterDAO.createConfig(clusterConfigEntity);
-          clusterEntity.getClusterConfigEntities().add(clusterConfigEntity);
-          cluster.addConfig(config);
-          clusterDAO.merge(clusterEntity);
-          cluster.refresh();
+          config = configFactory.createNew(cluster, config.getType(), config.getTag(),
+              config.getProperties(), config.getPropertiesAttributes());
+
+          entry.setValue(config);
+
+          clusterConfigEntity = clusterDAO.findConfig(cluster.getClusterId(), config.getType(),
+              config.getTag());
         }
 
         ConfigGroupConfigMappingEntity configMappingEntity =
           new ConfigGroupConfigMappingEntity();
+
         configMappingEntity.setTimestamp(System.currentTimeMillis());
         configMappingEntity.setClusterId(clusterEntity.getClusterId());
         configMappingEntity.setClusterConfigEntity(clusterConfigEntity);
@@ -443,142 +444,84 @@ public class ConfigGroupImpl implements ConfigGroup {
     }
   }
 
-  void saveIfPersisted() {
-    if (isPersisted) {
-      save(clusterDAO.findById(cluster.getClusterId()));
-    }
-  }
-
-  @Transactional
-  void save(ClusterEntity clusterEntity) {
-    persistHostMapping();
-    persistConfigMapping(clusterEntity);
-  }
-
   @Override
+  @Transactional
   public void delete() {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-      configGroupDAO.removeByPK(configGroupEntity.getGroupId());
-      cluster.refresh();
-      isPersisted = false;
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    configGroupConfigMappingDAO.removeAllByGroup(configGroupId);
+    configGroupHostMappingDAO.removeAllByGroup(configGroupId);
+    configGroupDAO.removeByPK(configGroupId);
+    cluster.refresh();
   }
 
   @Override
   public void addHost(Host host) throws AmbariException {
-    readWriteLock.writeLock().lock();
+    hostLock.writeLock().lock();
     try {
-      if (hosts != null && !hosts.isEmpty()) {
-        for (Host h : hosts.values()) {
-          if (h.getHostName().equals(host.getHostName())) {
-            throw new DuplicateResourceException("Host " + h.getHostName() +
-              "is already associated with Config Group " +
-              configGroupEntity.getGroupName());
-          }
-        }
-        HostEntity hostEntity = hostDAO.findByName(host.getHostName());
-        if (hostEntity != null) {
-          hosts.put(hostEntity.getHostId(), host);
-        }
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
+      if (m_hosts.containsKey(host.getHostId())) {
+        String message = String.format(
+            "Host %s is already associated with the configuration group %s", host.getHostName(),
+            configGroupName);
 
-  @Override
-  public void addConfiguration(Config config) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      if (configurations != null && !configurations.isEmpty()) {
-        for (Config c : configurations.values()) {
-          if (c.getType().equals(config.getType()) && c.getTag().equals
-            (config.getTag())) {
-            throw new DuplicateResourceException("Config " + config.getType() +
-              " with tag " + config.getTag() + " is already associated " +
-              "with Config Group " + configGroupEntity.getGroupName());
-          }
-        }
-        configurations.put(config.getType(), config);
+        throw new DuplicateResourceException(message);
       }
+
+      // ensure that we only update the in-memory structure if the merge was
+      // successful
+      ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+      persistHostMapping(Collections.singletonList(host), configGroupEntity);
+      m_hosts.putIfAbsent(host.getHostId(), host);
     } finally {
-      readWriteLock.writeLock().unlock();
+      hostLock.writeLock().unlock();
     }
   }
 
   @Override
   public ConfigGroupResponse convertToResponse() throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
-      for (Host host : hosts.values()) {
-        Map<String, Object> hostMap = new HashMap<String, Object>();
-        hostMap.put("host_name", host.getHostName());
-        hostnames.add(hostMap);
-      }
-
-      Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
+    Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
+    for (Host host : m_hosts.values()) {
+      Map<String, Object> hostMap = new HashMap<String, Object>();
+      hostMap.put("host_name", host.getHostName());
+      hostnames.add(hostMap);
+    }
 
-      for (Config config : configurations.values()) {
-        Map<String, Object> configMap = new HashMap<String, Object>();
-        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
-            config.getType());
-        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
-            config.getTag());
-        configObjMap.add(configMap);
-      }
+    Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
 
-      ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
-          configGroupEntity.getGroupId(), cluster.getClusterName(),
-          configGroupEntity.getGroupName(), configGroupEntity.getTag(),
-          configGroupEntity.getDescription(), hostnames, configObjMap);
-      return configGroupResponse;
-    } finally {
-      readWriteLock.readLock().unlock();
+    for (Config config : m_configurations.values()) {
+      Map<String, Object> configMap = new HashMap<String, Object>();
+      configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+          config.getType());
+      configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
+          config.getTag());
+      configObjMap.add(configMap);
     }
-  }
 
-  @Override
-  @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted) {
-        ConfigGroupEntity groupEntity = configGroupDAO.findById
-          (configGroupEntity.getGroupId());
-        configGroupDAO.refresh(groupEntity);
-        // TODO What other entities should refresh?
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
+        configGroupEntity.getGroupId(), cluster.getClusterName(),
+        configGroupEntity.getGroupName(), configGroupEntity.getTag(),
+        configGroupEntity.getDescription(), hostnames, configObjMap);
+    return configGroupResponse;
   }
 
-
   @Override
   public String getServiceName() {
-    readWriteLock.readLock().lock();
-    try {
-      return configGroupEntity.getServiceName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    return configGroupEntity.getServiceName();
   }
 
   @Override
   public void setServiceName(String serviceName) {
-    readWriteLock.writeLock().lock();
-    try {
-      configGroupEntity.setServiceName(serviceName);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
+    configGroupEntity.setServiceName(serviceName);
+    configGroupDAO.merge(configGroupEntity);
+  }
 
+  /**
+   * Gets the {@link ConfigGroupEntity} by it's ID from the JPA cache.
+   *
+   * @return the entity.
+   */
+  private ConfigGroupEntity getConfigGroupEntity() {
+    return configGroupDAO.findById(configGroupId);
   }
 }

+ 14 - 11
ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java

@@ -67,11 +67,10 @@ import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -91,8 +90,13 @@ public class AmbariContext {
   @Inject
   private PersistedState persistedState;
 
+  /**
+   * Used for creating read-only instances of existing {@link Config} in order
+   * to send them to the {@link ConfigGroupResourceProvider} to create
+   * {@link ConfigGroup}s.
+   */
   @Inject
-  private org.apache.ambari.server.configuration.Configuration configs;
+  ConfigFactory configFactory;
 
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
@@ -458,11 +462,13 @@ public class AmbariContext {
         SortedSet<DesiredConfig> desiredConfigsOrderedByVersion = new TreeSet<>(new Comparator<DesiredConfig>() {
           @Override
           public int compare(DesiredConfig o1, DesiredConfig o2) {
-            if (o1.getVersion() < o2.getVersion())
+            if (o1.getVersion() < o2.getVersion()) {
               return -1;
+            }
 
-            if (o1.getVersion() > o2.getVersion())
+            if (o1.getVersion() > o2.getVersion()) {
               return 1;
+            }
 
             return 0;
           }
@@ -473,9 +479,9 @@ public class AmbariContext {
         int tagMatchState = 0; // 0 -> INITIAL -> tagMatchState = 1 -> TOPLOGY_RESOLVED -> tagMatchState = 2
 
         for (DesiredConfig config: desiredConfigsOrderedByVersion) {
-          if (config.getTag().equals(TopologyManager.INITIAL_CONFIG_TAG) && tagMatchState == 0)
+          if (config.getTag().equals(TopologyManager.INITIAL_CONFIG_TAG) && tagMatchState == 0) {
             tagMatchState = 1;
-          else if (config.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG) && tagMatchState == 1) {
+          } else if (config.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG) && tagMatchState == 1) {
             tagMatchState = 2;
             break;
           }
@@ -551,7 +557,6 @@ public class AmbariContext {
           addedHost = true;
           if (! group.getHosts().containsKey(host.getHostId())) {
             group.addHost(host);
-            group.persistHostMapping();
           }
 
         } catch (AmbariException e) {
@@ -585,9 +590,7 @@ public class AmbariContext {
     for (Map.Entry<String, Map<String, String>> entry : userProvidedGroupProperties.entrySet()) {
       String type = entry.getKey();
       String service = stack.getServiceForConfigType(type);
-      Config config = new ConfigImpl(type);
-      config.setTag(groupName);
-      config.setProperties(entry.getValue());
+      Config config = configFactory.createReadOnly(type, groupName, entry.getValue(), null);
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {

+ 6 - 4
ambari-server/src/main/java/org/apache/ambari/server/update/HostUpdateHelper.java

@@ -53,8 +53,8 @@ import org.apache.ambari.server.orm.entities.TopologyRequestEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.apache.commons.lang.StringUtils;
@@ -234,12 +234,12 @@ public class HostUpdateHelper {
           boolean configUpdated;
 
           // going through all cluster configs and update property values
+          ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
           for (ClusterConfigEntity clusterConfigEntity : clusterConfigEntities) {
-            ConfigImpl config = new ConfigImpl(cluster, clusterConfigEntity, injector);
+            Config config = configFactory.createExisting(cluster, clusterConfigEntity);
             configUpdated = false;
 
             for (Map.Entry<String,String> property : config.getProperties().entrySet()) {
-
               updatedPropertyValue = replaceHosts(property.getValue(), currentHostNames, hostMapping);
 
               if (updatedPropertyValue != null) {
@@ -249,8 +249,9 @@ public class HostUpdateHelper {
                 configUpdated = true;
               }
             }
+
             if (configUpdated) {
-              config.persist(false);
+              config.save();
             }
           }
         }
@@ -317,6 +318,7 @@ public class HostUpdateHelper {
   * */
   public class StringComparator implements Comparator<String> {
 
+    @Override
     public int compare(String s1, String s2) {
       return s2.length() - s1.length();
     }

+ 4 - 13
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java

@@ -36,7 +36,6 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.StackId;
@@ -128,24 +127,16 @@ public class ExecutionCommandWrapperTest {
     CONFIG_ATTRIBUTES = new HashMap<String, Map<String,String>>();
 
     //Cluster level global config
-    Config globalConfig = configFactory.createNew(cluster1, GLOBAL_CONFIG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
-    globalConfig.setTag(CLUSTER_VERSION_TAG);
-    cluster1.addConfig(globalConfig);
+    configFactory.createNew(cluster1, GLOBAL_CONFIG, CLUSTER_VERSION_TAG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES);
 
     //Cluster level service config
-    Config serviceSiteConfigCluster = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
-    serviceSiteConfigCluster.setTag(CLUSTER_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigCluster);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, CLUSTER_VERSION_TAG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES);
 
     //Service level service config
-    Config serviceSiteConfigService = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
-    serviceSiteConfigService.setTag(SERVICE_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigService);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_VERSION_TAG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES);
 
     //Host level service config
-    Config serviceSiteConfigHost = configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
-    serviceSiteConfigHost.setTag(HOST_VERSION_TAG);
-    cluster1.addConfig(serviceSiteConfigHost);
+    configFactory.createNew(cluster1, SERVICE_SITE_CONFIG, HOST_VERSION_TAG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES);
 
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
 

+ 5 - 14
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java

@@ -34,8 +34,8 @@ import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.StackId;
 import org.junit.After;
@@ -103,15 +103,11 @@ public class TestActionSchedulerThreading {
     Map<String, String> properties = new HashMap<String, String>();
     Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String, String>>();
 
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+
     // foo-type for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c1.setTag("version-1");
-    c1.setStackId(stackId);
-    c1.setVersion(1L);
-
-    cluster.addConfig(c1);
-    c1.persist();
+    Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -122,12 +118,7 @@ public class TestActionSchedulerThreading {
     // save v2
     // foo-type for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c2.setTag("version-2");
-    c2.setStackId(newStackId);
-    c2.setVersion(2L);
-    cluster.addConfig(c2);
-    c2.persist();
+    Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");

+ 1 - 5
ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java

@@ -193,11 +193,7 @@ public class HeartbeatTestHelper {
     cluster.setCurrentStackVersion(stackId);
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(cluster, "cluster-env", configProperties, new HashMap<String, Map<String, String>>());
-    config.setTag("version1");
-    config.persist();
-
-    cluster.addConfig(config);
+    Config config = cf.createNew(cluster, "cluster-env", "version1", configProperties, new HashMap<String, Map<String, String>>());
     cluster.addDesiredConfig("user", Collections.singleton(config));
 
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());

+ 4 - 9
ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java

@@ -159,10 +159,8 @@ public class TestHeartbeatMonitor {
     }};
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(cluster, "hadoop-env",
+    Config config = configFactory.createNew(cluster, "hadoop-env", "version1",
         new HashMap<String,String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version1");
-    cluster.addConfig(config);
     cluster.addDesiredConfig("_test", Collections.singleton(config));
 
 
@@ -243,18 +241,15 @@ public class TestHeartbeatMonitor {
     }};
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config hadoopEnvConfig = configFactory.createNew(cluster, "hadoop-env",
+    Config hadoopEnvConfig = configFactory.createNew(cluster, "hadoop-env", "version1",
       new HashMap<String, String>() {{
         put("a", "b");
       }}, new HashMap<String, Map<String,String>>());
-    Config hbaseEnvConfig = configFactory.createNew(cluster, "hbase-env",
+    Config hbaseEnvConfig = configFactory.createNew(cluster, "hbase-env", "version1",
             new HashMap<String, String>() {{
               put("a", "b");
             }}, new HashMap<String, Map<String,String>>());
-    hadoopEnvConfig.setTag("version1");
-    cluster.addConfig(hadoopEnvConfig);
-    hbaseEnvConfig.setTag("version1");
-    cluster.addConfig(hbaseEnvConfig);
+
     cluster.addDesiredConfig("_test", Collections.singleton(hadoopEnvConfig));
 
 

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java

@@ -218,7 +218,7 @@ public class RecoveryConfigHelperTest {
     config.updateProperties(new HashMap<String, String>() {{
       put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "false");
     }});
-    config.persist(false);
+    config.save();
 
     // Recovery config should be stale because of the above change.
     boolean isConfigStale = recoveryConfigHelper.isConfigStale(cluster.getClusterName(), DummyHostname1,

+ 8 - 14
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java

@@ -87,8 +87,8 @@ import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
@@ -610,6 +610,7 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+    Config config = createNiceMock(Config.class);
 
     // requests
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
@@ -632,18 +633,11 @@ public class AmbariManagementControllerImplTest {
     expect(clusters.getClusterById(1L)).andReturn(cluster).anyTimes();
     expect(cluster.getClusterName()).andReturn("clusterOld").anyTimes();
     expect(cluster.getConfigPropertiesTypes(anyObject(String.class))).andReturn(Maps.<PropertyInfo.PropertyType, Set<String>>newHashMap()).anyTimes();
-    expect(cluster.getDesiredConfigByType(anyObject(String.class))).andReturn(new ConfigImpl("config-type") {
-      @Override
-      public Map<String, Map<String, String>> getPropertiesAttributes() {
-        return Maps.newHashMap();
-      }
-
-      @Override
-      public Map<String, String> getProperties() {
-        return configReqProps;
-      }
 
-    }).anyTimes();
+    expect(config.getType()).andReturn("config-type").anyTimes();
+    expect(config.getProperties()).andReturn(configReqProps).anyTimes();
+    expect(config.getPropertiesAttributes()).andReturn(new HashMap<String,Map<String,String>>()).anyTimes();
+    expect(cluster.getDesiredConfigByType(anyObject(String.class))).andReturn(config).anyTimes();
 
     cluster.addSessionAttributes(anyObject(Map.class));
     expectLastCall().once();
@@ -652,7 +646,7 @@ public class AmbariManagementControllerImplTest {
     expectLastCall();
 
     // replay mocks
-    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    replay(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager);
 
     // test
     AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
@@ -660,7 +654,7 @@ public class AmbariManagementControllerImplTest {
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
-    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    verify(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager);
   }
 
   /**

+ 29 - 80
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -122,7 +122,6 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
@@ -408,7 +407,6 @@ public class AmbariManagementControllerTest {
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
       tag, "", configMap, hostMap);
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     return configGroup.getId();
@@ -1940,10 +1938,8 @@ public class AmbariManagementControllerTest {
     Map<String, String> properties = new HashMap<String, String>();
     Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String,String>>();
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, injector);
-    c1.setTag("v1");
-    cluster.addConfig(c1);
-    c1.persist();
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1",  properties, propertiesAttributes);
     configs.put(c1.getType(), c1);
 
     ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
@@ -1983,26 +1979,17 @@ public class AmbariManagementControllerTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, injector);
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1", properties, propertiesAttributes);
     properties.put("c", cluster1);
     properties.put("d", "d1");
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, injector);
+
+    Config c2 = configFactory.createNew(cluster, "core-site", "v1", properties, propertiesAttributes);
+    Config c3 = configFactory.createNew(cluster, "foo-site", "v1", properties, propertiesAttributes);
 
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
 
-    c1.setTag("v1");
-    c2.setTag("v1");
-    c3.setTag("v1");
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-
     configs.put(c1.getType(), c1);
     configs.put(c2.getType(), c2);
 
@@ -4210,27 +4197,20 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String, String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
 
-    Config config3 = cf.createNew(cluster, "yarn-site",
+    Config config3 = cf.createNew(cluster, "yarn-site", "version1",
         new HashMap<String, String>() {{
           put("test.password", "supersecret");
         }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
-    cluster.addConfig(config3);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -4383,20 +4363,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
 
@@ -4488,19 +4463,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>() {{
           put("key1", "value1");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-    config1.persist();
-    config2.persist();
 
     cluster.addConfig(config1);
     cluster.addConfig(config2);
@@ -4776,18 +4747,14 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-0.1"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
     config1.setPropertiesAttributes(new HashMap<String, Map<String, String>>(){{ put("attr1", new HashMap<String, String>()); }});
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
     config2.setPropertiesAttributes(new HashMap<String, Map<String, String>>(){{ put("attr2", new HashMap<String, String>()); }});
 
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
     cluster.addDesiredConfig("_test", Collections.singleton(config1));
     cluster.addDesiredConfig("_test", Collections.singleton(config2));
 
@@ -5522,11 +5489,8 @@ public class AmbariManagementControllerTest {
       configs3, null);
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "kerberos-env",
+    Config config1 = cf.createNew(cluster, "kerberos-env", "version1",
         new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
-
-    cluster.addConfig(config1);
 
     ClusterRequest crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
@@ -6448,20 +6412,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -6554,20 +6513,15 @@ public class AmbariManagementControllerTest {
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
-    Config config1 = cf.createNew(cluster, "global",
+    Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = cf.createNew(cluster, "core-site",
+    Config config2 = cf.createNew(cluster, "core-site", "version1",
       new HashMap<String, String>() {{
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version1");
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
     Service mapred = cluster.addService("YARN");
@@ -6981,13 +6935,13 @@ public class AmbariManagementControllerTest {
     String group2 = getUniqueName();
     String tag2 = getUniqueName();
 
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+
     // Create Config group for core-site
     configs = new HashMap<String, String>();
     configs.put("a", "c");
     cluster = clusters.getCluster(cluster1);
-    final Config config = new ConfigImpl("core-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    final Config config =  configFactory.createReadOnly("core-site", "version122", configs, null);
     Long groupId = createConfigGroup(cluster, group1, tag1,
       new ArrayList<String>() {{ add(host1); }},
       new ArrayList<Config>() {{ add(config); }});
@@ -6998,9 +6952,7 @@ public class AmbariManagementControllerTest {
     configs = new HashMap<String, String>();
     configs.put("a", "c");
 
-    final Config config2 = new ConfigImpl("mapred-site");
-    config2.setProperties(configs);
-    config2.setTag("version122");
+    final Config config2 =  configFactory.createReadOnly("mapred-site", "version122", configs, null);
     groupId = createConfigGroup(cluster, group2, tag2,
       new ArrayList<String>() {{ add(host1); }},
       new ArrayList<Config>() {{ add(config2); }});
@@ -7065,7 +7017,6 @@ public class AmbariManagementControllerTest {
     ConfigGroup configGroup = cluster.getConfigGroups().get(groupId);
     configGroup.setHosts(new HashMap<Long, Host>() {{ put(3L,
       clusters.getHost(host3)); }});
-    configGroup.persist();
 
     requestId = startService(cluster1, serviceName2, false, false);
     mapredInstall = null;
@@ -7143,9 +7094,8 @@ public class AmbariManagementControllerTest {
     String group1 = getUniqueName();
     String tag1 = getUniqueName();
 
-    final Config config = new ConfigImpl("hdfs-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
     Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
         new ArrayList<String>() {{
           add(host1);
@@ -7253,9 +7203,8 @@ public class AmbariManagementControllerTest {
     configs = new HashMap<String, String>();
     configs.put("a", "c");
 
-    final Config config = new ConfigImpl("hdfs-site");
-    config.setProperties(configs);
-    config.setTag("version122");
+    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
+    final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
     Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
       new ArrayList<String>() {{ add(host1); add(host2); }},
       new ArrayList<Config>() {{ add(config); }});

+ 5 - 9
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java

@@ -66,7 +66,7 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -108,6 +108,7 @@ public class UpgradeResourceProviderHDP22Test {
   private AmbariManagementController amc;
   private StackDAO stackDAO;
   private TopologyManager topologyManager;
+  private ConfigFactory configFactory;
 
   private static final String configTagVersion1 = "version1";
   private static final String configTagVersion2 = "version2";
@@ -136,6 +137,7 @@ public class UpgradeResourceProviderHDP22Test {
     stackDAO = injector.getInstance(StackDAO.class);
     upgradeDao = injector.getInstance(UpgradeDAO.class);
     repoVersionDao = injector.getInstance(RepositoryVersionDAO.class);
+    configFactory = injector.getInstance(ConfigFactory.class);
 
     AmbariEventPublisher publisher = createNiceMock(AmbariEventPublisher.class);
     replay(publisher);
@@ -233,11 +235,7 @@ public class UpgradeResourceProviderHDP22Test {
       }
     }
 
-    Config config = new ConfigImpl("hive-site");
-    config.setProperties(configTagVersion1Properties);
-    config.setTag(configTagVersion1);
-
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, "hive-site", configTagVersion1, configTagVersion1Properties, null);
     cluster.addDesiredConfig("admin", Collections.singleton(config));
 
     Map<String, Object> requestProps = new HashMap<String, Object>();
@@ -286,9 +284,7 @@ public class UpgradeResourceProviderHDP22Test {
     // Hive service checks have generated the ExecutionCommands by now.
     // Change the new desired config tag and verify execution command picks up new tag
     assertEquals(configTagVersion1, cluster.getDesiredConfigByType("hive-site").getTag());
-    final Config newConfig = new ConfigImpl("hive-site");
-    newConfig.setProperties(configTagVersion2Properties);
-    newConfig.setTag(configTagVersion2);
+    final Config newConfig = configFactory.createNew(cluster, "hive-site", configTagVersion2, configTagVersion2Properties, null);
     Set<Config> desiredConfigs = new HashSet<Config>() {
       {
         add(newConfig);

+ 4 - 9
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java

@@ -85,8 +85,8 @@ import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryActio
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
@@ -144,6 +144,7 @@ public class UpgradeResourceProviderTest {
   private StackDAO stackDAO;
   private AmbariMetaInfo ambariMetaInfo;
   private TopologyManager topologyManager;
+  private ConfigFactory configFactory;
 
   @Before
   public void before() throws Exception {
@@ -174,6 +175,7 @@ public class UpgradeResourceProviderTest {
 
     amc = injector.getInstance(AmbariManagementController.class);
     ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    configFactory = injector.getInstance(ConfigFactory.class);
 
     Field field = AmbariServer.class.getDeclaredField("clusterController");
     field.setAccessible(true);
@@ -1046,16 +1048,9 @@ public class UpgradeResourceProviderTest {
     }
 
 
-    Config config = new ConfigImpl("zoo.cfg");
-    config.setProperties(new HashMap<String, String>() {{
-      put("a", "b");
-    }});
-    config.setTag("abcdefg");
-
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, "zoo.cfg", "abcdefg", Collections.singletonMap("a", "b"), null);
     cluster.addDesiredConfig("admin", Collections.singleton(config));
 
-
     Map<String, Object> requestProps = new HashMap<String, Object>();
     requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
     requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.0.0");

+ 7 - 12
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java

@@ -49,8 +49,7 @@ import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -113,6 +112,9 @@ public class ComponentVersionCheckActionTest {
   @Inject
   private ServiceComponentHostFactory serviceComponentHostFactory;
 
+  @Inject
+  private ConfigFactory configFactory;
+
   @Before
   public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -399,18 +401,11 @@ public class ComponentVersionCheckActionTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "hdfs-site", "version1", properties, propertiesAttributes);
     properties.put("c", "c1");
     properties.put("d", "d1");
 
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
+    configFactory.createNew(cluster, "core-site", "version1", properties, propertiesAttributes);
+    configFactory.createNew(cluster, "foo-site", "version1", properties, propertiesAttributes);
   }
 }

+ 19 - 77
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java

@@ -132,13 +132,10 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -196,16 +193,13 @@ public class ConfigureActionTest {
 
     // create a config for zoo.cfg with two values; one is a stack value and the
     // other is custom
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("tickTime", "2000");
         put("foo", "bar");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -262,16 +256,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copyIt", "10");
           put("moveIt", "10");
           put("deleteIt", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -402,15 +393,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("zoo.server.csv", "c6401,c6402,  c6403");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -468,16 +456,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("key_to_replace", "My New Cat");
         put("key_with_no_match", "WxyAndZ");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -543,16 +528,13 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("existing", "This exists!");
         put("missing", null);
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -604,16 +586,12 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("fooKey", "barValue");
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -671,7 +649,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("set.key.1", "s1");
         put("set.key.2", "s2");
@@ -680,10 +658,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -769,7 +743,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("set.key.1", "s1");
         put("set.key.2", "s2");
@@ -778,10 +752,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -855,7 +825,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("replace.key.1", "r1");
         put("replace.key.2", "r2");
@@ -865,10 +835,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -951,7 +917,7 @@ public class ConfigureActionTest {
 
     c.setCurrentStackVersion(HDP_211_STACK);
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
       {
         put("replace.key.1", "r1");
         put("replace.key.2", "r2");
@@ -961,10 +927,6 @@ public class ConfigureActionTest {
       }
     }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1041,15 +1003,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copy.key.1", "c1");
           put("copy.key.2", "c2");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1157,15 +1116,12 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("copy.key.1", "c1");
           put("copy.key.2", "c2");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1253,17 +1209,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("move.key.1", "m1");
           put("move.key.2", "m2");
           put("move.key.3", "m3");
           put("move.key.4", "m4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1362,17 +1315,15 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2",
+        new HashMap<String, String>() {{
           put("initLimit", "10");
           put("move.key.1", "m1");
           put("move.key.2", "m2");
           put("move.key.3", "m3");
           put("move.key.4", "m4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1466,17 +1417,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("delete.key.1", "d1");
           put("delete.key.2", "d2");
           put("delete.key.3", "d3");
           put("delete.key.4", "d4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1567,17 +1515,14 @@ public class ConfigureActionTest {
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
     c.setDesiredStackVersion(HDP_220_STACK);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {{
           put("initLimit", "10");
           put("delete.key.1", "d1");
           put("delete.key.2", "d2");
           put("delete.key.3", "d3");
           put("delete.key.4", "d4");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version2");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
@@ -1674,15 +1619,12 @@ public class ConfigureActionTest {
     // service properties will not run!
     installService(c, "ZOOKEEPER");
 
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+    Config config = cf.createNew(c, "zoo.cfg", "version1", new HashMap<String, String>() {
       {
         put("initLimit", "10");
       }
     }, new HashMap<String, Map<String, String>>());
-    config.setTag("version1");
-    config.persist();
 
-    c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
 
     // add a host component

+ 27 - 49
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java

@@ -17,7 +17,16 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -25,18 +34,11 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import com.google.inject.Injector;
 
 /**
  * Tests OozieConfigCalculation logic
@@ -53,52 +55,28 @@ public class FixOozieAdminUsersTest {
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
 
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("falcon_user", "falcon");
+    }};
+
+    Config falconEnvConfig = EasyMock.createNiceMock(Config.class);
+    expect(falconEnvConfig.getType()).andReturn("falcon-env").anyTimes();
+    expect(falconEnvConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    mockProperties = new HashMap<String, String>() {{
+      put("oozie_admin_users", "oozie, oozie-admin");
+    }};
+
+    Config oozieEnvConfig = EasyMock.createNiceMock(Config.class);
+    expect(oozieEnvConfig.getType()).andReturn("oozie-env").anyTimes();
+    expect(oozieEnvConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
-    Config falconEnvConfig = new ConfigImpl("falcon-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("falcon_user", "falcon");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-    Config oozieEnvConfig = new ConfigImpl("oozie-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("oozie_admin_users", "oozie, oozie-admin");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
     expect(cluster.getDesiredConfigByType("falcon-env")).andReturn(falconEnvConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("oozie-env")).andReturn(oozieEnvConfig).atLeastOnce();
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
-    replay(injector, clusters);
+    replay(injector, clusters, falconEnvConfig, oozieEnvConfig);
 
     clustersField = FixOozieAdminUsers.class.getDeclaredField("clusters");
     clustersField.setAccessible(true);

+ 88 - 99
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java

@@ -17,8 +17,18 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -26,21 +36,13 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
 
 /**
  * Tests HiveEnvClasspathAction logic
@@ -55,99 +57,86 @@ public class HBaseEnvMaxDirectMemorySizeActionTest {
     injector = EasyMock.createMock(Injector.class);
     clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
-
-    Config hbaseEnv = new ConfigImpl("hbase-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("content","# Set environment variables here.\n" +
-          "\n" +
-          "# The java implementation to use. Java 1.6 required.\n" +
-          "export JAVA_HOME={{java64_home}}\n" +
-          "\n" +
-          "# HBase Configuration directory\n" +
-          "export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n" +
-          "\n" +
-          "# Extra Java CLASSPATH elements. Optional.\n" +
-          "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-          "\n" +
-          "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-          "# export HBASE_HEAPSIZE=1000\n" +
-          "\n" +
-          "# Extra Java runtime options.\n" +
-          "# Below are what we set by default. May only work with SUN JVM.\n" +
-          "# For more on why as well as other possible settings,\n" +
-          "# see http://wiki.apache.org/hadoop/PerformanceTuning\n" +
-          "export SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-          "# Uncomment below to enable java garbage collection logging.\n" +
-          "# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "\n" +
-          "# Uncomment and adjust to enable JMX exporting\n" +
-          "# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n" +
-          "# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n" +
-          "#\n" +
-          "# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" +
-          "# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n" +
-          "# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n" +
-          "# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n" +
-          "\n" +
-          "# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\n" +
-          "export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n" +
-          "\n" +
-          "# Extra ssh options. Empty by default.\n" +
-          "# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n" +
-          "\n" +
-          "# Where log files are stored. $HBASE_HOME/logs by default.\n" +
-          "export HBASE_LOG_DIR={{log_dir}}\n" +
-          "\n" +
-          "# A string representing this instance of hbase. $USER by default.\n" +
-          "# export HBASE_IDENT_STRING=$USER\n" +
-          "\n" +
-          "# The scheduling priority for daemon processes. See 'man nice'.\n" +
-          "# export HBASE_NICENESS=10\n" +
-          "\n" +
-          "# The directory where pid files are stored. /tmp by default.\n" +
-          "export HBASE_PID_DIR={{pid_dir}}\n" +
-          "\n" +
-          "# Seconds to sleep between slave commands. Unset by default. This\n" +
-          "# can be useful in large clusters, where, e.g., slave rsyncs can\n" +
-          "# otherwise arrive faster than the master can service them.\n" +
-          "# export HBASE_SLAVE_SLEEP=0.1\n" +
-          "\n" +
-          "# Tell HBase whether it should manage it's own instance of Zookeeper or not.\n" +
-          "export HBASE_MANAGES_ZK=false\n" +
-          "\n" +
-          "{% if security_enabled %}\n" +
-          "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
-          "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
-          "{% else %}\n" +
-          "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
-          "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\n" +
-          "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
-          "{% endif %}");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
+    Config hbaseEnv = EasyMock.createNiceMock(Config.class);
+
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("content","# Set environment variables here.\n" +
+        "\n" +
+        "# The java implementation to use. Java 1.6 required.\n" +
+        "export JAVA_HOME={{java64_home}}\n" +
+        "\n" +
+        "# HBase Configuration directory\n" +
+        "export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n" +
+        "\n" +
+        "# Extra Java CLASSPATH elements. Optional.\n" +
+        "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
+        "\n" +
+        "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+        "# export HBASE_HEAPSIZE=1000\n" +
+        "\n" +
+        "# Extra Java runtime options.\n" +
+        "# Below are what we set by default. May only work with SUN JVM.\n" +
+        "# For more on why as well as other possible settings,\n" +
+        "# see http://wiki.apache.org/hadoop/PerformanceTuning\n" +
+        "export SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n" +
+        "# Uncomment below to enable java garbage collection logging.\n" +
+        "# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "\n" +
+        "# Uncomment and adjust to enable JMX exporting\n" +
+        "# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n" +
+        "# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n" +
+        "#\n" +
+        "# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" +
+        "# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n" +
+        "# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n" +
+        "# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n" +
+        "\n" +
+        "# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\n" +
+        "export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n" +
+        "\n" +
+        "# Extra ssh options. Empty by default.\n" +
+        "# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n" +
+        "\n" +
+        "# Where log files are stored. $HBASE_HOME/logs by default.\n" +
+        "export HBASE_LOG_DIR={{log_dir}}\n" +
+        "\n" +
+        "# A string representing this instance of hbase. $USER by default.\n" +
+        "# export HBASE_IDENT_STRING=$USER\n" +
+        "\n" +
+        "# The scheduling priority for daemon processes. See 'man nice'.\n" +
+        "# export HBASE_NICENESS=10\n" +
+        "\n" +
+        "# The directory where pid files are stored. /tmp by default.\n" +
+        "export HBASE_PID_DIR={{pid_dir}}\n" +
+        "\n" +
+        "# Seconds to sleep between slave commands. Unset by default. This\n" +
+        "# can be useful in large clusters, where, e.g., slave rsyncs can\n" +
+        "# otherwise arrive faster than the master can service them.\n" +
+        "# export HBASE_SLAVE_SLEEP=0.1\n" +
+        "\n" +
+        "# Tell HBase whether it should manage it's own instance of Zookeeper or not.\n" +
+        "export HBASE_MANAGES_ZK=false\n" +
+        "\n" +
+        "{% if security_enabled %}\n" +
+        "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
+        "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
+        "{% else %}\n" +
+        "export HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}\"\n" +
+        "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\n" +
+        "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n" +
+        "{% endif %}");
+    }};
+
+    expect(hbaseEnv.getType()).andReturn("hbase-env").anyTimes();
+    expect(hbaseEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hbase-env")).andReturn(hbaseEnv).atLeastOnce();
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
 
-    replay(injector, clusters, cluster);
+    replay(injector, clusters, cluster, hbaseEnv);
 
     m_clusterField = HBaseEnvMaxDirectMemorySizeAction.class.getDeclaredField("clusters");
     m_clusterField.setAccessible(true);

+ 68 - 80
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java

@@ -17,8 +17,18 @@
  */
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.agent.CommandReport;
@@ -26,22 +36,13 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import junit.framework.Assert;
 
 /**
  * Tests HiveEnvClasspathAction logic
@@ -57,79 +58,66 @@ public class HiveEnvClasspathActionTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config hiveEnv = new ConfigImpl("hive-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("content", "      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n" +
-          "      if [ \"$SERVICE\" = \"cli\" ]; then\n" +
-          "      if [ -z \"$DEBUG\" ]; then\n" +
-          "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n" +
-          "      else\n" +
-          "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
-          "      fi\n" +
-          "      fi\n" +
-          "\n" +
-          "      # The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-          "\n" +
-          "      if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-          "      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-          "      else\n" +
-          "      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-          "      fi\n" +
-          "\n" +
-          "      export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-          "\n" +
-          "      # Larger heap size may be required when running queries over large number of files or partitions.\n" +
-          "      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n" +
-          "      # appropriate for hive server (hwi etc).\n" +
-          "\n" +
-          "\n" +
-          "      # Set HADOOP_HOME to point to a specific hadoop install directory\n" +
-          "      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n" +
-          "\n" +
-          "      # Hive Configuration Directory can be controlled by:\n" +
-          "      export HIVE_CONF_DIR=test\n" +
-          "\n" +
-          "      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n" +
-          "      if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
-          "      if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
-          "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-          "      fi\n" +
-          "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-          "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-          "      fi\n" +
-          "\n" +
-          "      export METASTORE_PORT={{hive_metastore_port}}\n" +
-          "\n" +
-          "      {% if sqla_db_used or lib_dir_available %}\n" +
-          "      export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
-          "      export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
-          "      {% endif %}");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("content", "      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n" +
+        "      if [ \"$SERVICE\" = \"cli\" ]; then\n" +
+        "      if [ -z \"$DEBUG\" ]; then\n" +
+        "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n" +
+        "      else\n" +
+        "      export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
+        "      fi\n" +
+        "      fi\n" +
+        "\n" +
+        "      # The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+        "\n" +
+        "      if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+        "      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
+        "      else\n" +
+        "      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+        "      fi\n" +
+        "\n" +
+        "      export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+        "\n" +
+        "      # Larger heap size may be required when running queries over large number of files or partitions.\n" +
+        "      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n" +
+        "      # appropriate for hive server (hwi etc).\n" +
+        "\n" +
+        "\n" +
+        "      # Set HADOOP_HOME to point to a specific hadoop install directory\n" +
+        "      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n" +
+        "\n" +
+        "      # Hive Configuration Directory can be controlled by:\n" +
+        "      export HIVE_CONF_DIR=test\n" +
+        "\n" +
+        "      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n" +
+        "      if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
+        "      if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
+        "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
+        "      fi\n" +
+        "      elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
+        "      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
+        "      fi\n" +
+        "\n" +
+        "      export METASTORE_PORT={{hive_metastore_port}}\n" +
+        "\n" +
+        "      {% if sqla_db_used or lib_dir_available %}\n" +
+        "      export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
+        "      export JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n" +
+        "      {% endif %}");
+    }};
+
+    Config hiveEnv = EasyMock.createNiceMock(Config.class);
+    expect(hiveEnv.getType()).andReturn("hive-env").anyTimes();
+    expect(hiveEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveEnv).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, hiveEnv);
 
     m_clusterField = HiveEnvClasspathAction.class.getDeclaredField("clusters");
     m_clusterField.setAccessible(true);

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java

@@ -91,7 +91,7 @@ public class HiveZKQuorumConfigActionTest {
     m_hiveSiteConfig.setProperties(EasyMock.anyObject(Map.class));
     EasyMock.expectLastCall().once();
 
-    m_hiveSiteConfig.persist(false);
+    m_hiveSiteConfig.save();
     EasyMock.expectLastCall().once();
 
     EasyMock.expect(m_cluster.getDesiredConfigByType(HiveZKQuorumConfigAction.HIVE_SITE_CONFIG_TYPE)).andReturn(m_hiveSiteConfig).atLeastOnce();

+ 7 - 21
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java

@@ -36,7 +36,6 @@ import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.EasyMock;
@@ -65,26 +64,13 @@ public class KerberosKeytabsActionTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     m_kerberosHelper = EasyMock.createMock(KerberosHelper.class);
 
-    m_kerberosConfig = new ConfigImpl("kerberos-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kerberos-env", "");
-      }};
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("kerberos-env", "");
+    }};
 
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    m_kerberosConfig = EasyMock.createNiceMock(Config.class);
+    expect(m_kerberosConfig.getType()).andReturn("kerberos-env").anyTimes();
+    expect(m_kerberosConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
@@ -92,7 +78,7 @@ public class KerberosKeytabsActionTest {
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
 
-    replay(m_clusters, cluster);
+    replay(m_clusters, cluster, m_kerberosConfig);
 
     m_injector = Guice.createInjector(new AbstractModule() {
 

+ 22 - 50
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java

@@ -35,7 +35,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -57,54 +56,27 @@ public class RangerConfigCalculationTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config adminConfig = new ConfigImpl("admin-properties") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("DB_FLAVOR", "MYSQL");
-        put("db_host", "host1");
-        put("db_name", "ranger");
-        put("audit_db_name", "ranger_audit");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config adminSiteConfig = new ConfigImpl("admin-properties") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
-    Config rangerEnv = new ConfigImpl("ranger-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("DB_FLAVOR", "MYSQL");
+      put("db_host", "host1");
+      put("db_name", "ranger");
+      put("audit_db_name", "ranger_audit");
+    }};
+
+    Config adminConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminConfig.getType()).andReturn("admin-properties").anyTimes();
+    expect(adminConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    mockProperties = new HashMap<String, String>();
+
+    Config adminSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminSiteConfig.getType()).andReturn("admin-properties").anyTimes();
+    expect(adminSiteConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    Config rangerEnv = EasyMock.createNiceMock(Config.class);
+    expect(rangerEnv.getType()).andReturn("ranger-env").anyTimes();
+    expect(rangerEnv.getProperties()).andReturn(mockProperties).anyTimes();
+
 
     expect(cluster.getDesiredConfigByType("admin-properties")).andReturn(adminConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("ranger-admin-site")).andReturn(adminSiteConfig).atLeastOnce();
@@ -113,7 +85,7 @@ public class RangerConfigCalculationTest {
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, adminConfig, adminSiteConfig, rangerEnv);
 
     m_clusterField = RangerConfigCalculation.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);

+ 50 - 123
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java

@@ -25,6 +25,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.reflect.Field;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -34,9 +35,8 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -59,124 +59,50 @@ public class RangerKerberosConfigCalculationTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config hadoopConfig = new ConfigImpl("hadoop-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hdfs_user", "hdfs");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-
-    Config hiveConfig = new ConfigImpl("hive-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hive_user", "hive");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config yarnConfig = new ConfigImpl("yarn-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("yarn_user", "yarn");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config hbaseConfig = new ConfigImpl("hbase-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("hbase_user", "hbase");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config knoxConfig = new ConfigImpl("knox-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("knox_user", "knox");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config stormConfig = new ConfigImpl("storm-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("storm_user", "storm");
-        put("storm_principal_name", "storm-c1@EXAMLE.COM");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config kafkaConfig = new ConfigImpl("kafka-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kafka_user", "kafka");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config kmsConfig = new ConfigImpl("kms-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("kms_user", "kms");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config hdfsSiteConfig = new ConfigImpl("hdfs-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-    };
-
-    Config adminSiteConfig = new ConfigImpl("ranger-admin-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Config hadoopConfig = EasyMock.createNiceMock(Config.class);
+    expect(hadoopConfig.getType()).andReturn("hadoop-env").anyTimes();
+    expect(hadoopConfig.getProperties()).andReturn(Collections.singletonMap("hdfs_user", "hdfs")).anyTimes();
+
+    Config hiveConfig = EasyMock.createNiceMock(Config.class);
+    expect(hiveConfig.getType()).andReturn("hive-env").anyTimes();
+    expect(hiveConfig.getProperties()).andReturn(Collections.singletonMap("hive_user", "hive")).anyTimes();
+
+    Config yarnConfig = EasyMock.createNiceMock(Config.class);
+    expect(yarnConfig.getType()).andReturn("yarn-env").anyTimes();
+    expect(yarnConfig.getProperties()).andReturn(Collections.singletonMap("yarn_user", "yarn")).anyTimes();
+
+    Config hbaseConfig = EasyMock.createNiceMock(Config.class);
+    expect(hbaseConfig.getType()).andReturn("hbase-env").anyTimes();
+    expect(hbaseConfig.getProperties()).andReturn(Collections.singletonMap("hbase_user", "hbase")).anyTimes();
+
+    Config knoxConfig = EasyMock.createNiceMock(Config.class);
+    expect(knoxConfig.getType()).andReturn("knox-env").anyTimes();
+    expect(knoxConfig.getProperties()).andReturn(Collections.singletonMap("knox_user", "knox")).anyTimes();
+
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("storm_user", "storm");
+      put("storm_principal_name", "storm-c1@EXAMLE.COM");
+    }};
+
+    Config stormConfig = EasyMock.createNiceMock(Config.class);
+    expect(stormConfig.getType()).andReturn("storm-env").anyTimes();
+    expect(stormConfig.getProperties()).andReturn(mockProperties).anyTimes();
+
+    Config kafkaConfig = EasyMock.createNiceMock(Config.class);
+    expect(kafkaConfig.getType()).andReturn("kafka-env").anyTimes();
+    expect(kafkaConfig.getProperties()).andReturn(Collections.singletonMap("kafka_user", "kafka")).anyTimes();
+
+    Config kmsConfig = EasyMock.createNiceMock(Config.class);
+    expect(kmsConfig.getType()).andReturn("kms-env").anyTimes();
+    expect(kmsConfig.getProperties()).andReturn(Collections.singletonMap("kms_user", "kms")).anyTimes();
+
+    Config hdfsSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(hdfsSiteConfig.getType()).andReturn("hdfs-site").anyTimes();
+    expect(hdfsSiteConfig.getProperties()).andReturn(Collections.singletonMap("dfs.web.authentication.kerberos.keytab", "/etc/security/keytabs/spnego.kytab")).anyTimes();
+
+    Config adminSiteConfig = EasyMock.createNiceMock(Config.class);
+    expect(adminSiteConfig.getType()).andReturn("ranger-admin-site").anyTimes();
+    expect(adminSiteConfig.getProperties()).andReturn(new HashMap<String,String>()).anyTimes();
 
     expect(cluster.getDesiredConfigByType("hadoop-env")).andReturn(hadoopConfig).atLeastOnce();
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(hiveConfig).atLeastOnce();
@@ -193,7 +119,8 @@ public class RangerKerberosConfigCalculationTest {
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, hadoopConfig, hiveConfig, yarnConfig, hbaseConfig,
+        knoxConfig, stormConfig, kafkaConfig, kmsConfig, hdfsSiteConfig, adminSiteConfig);
 
     m_clusterField = RangerKerberosConfigCalculation.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
@@ -236,7 +163,7 @@ public class RangerKerberosConfigCalculationTest {
     assertTrue(map.containsKey("ranger.plugins.storm.serviceuser"));
     assertTrue(map.containsKey("ranger.plugins.kafka.serviceuser"));
     assertTrue(map.containsKey("ranger.plugins.kms.serviceuser"));
-    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));    
+    assertTrue(map.containsKey("ranger.spnego.kerberos.keytab"));
 
 
     assertEquals("hdfs", map.get("ranger.plugins.hdfs.serviceuser"));
@@ -254,4 +181,4 @@ public class RangerKerberosConfigCalculationTest {
 
   }
 
-} 
+}

+ 10 - 26
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java

@@ -34,9 +34,8 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -55,34 +54,19 @@ public class RangerKmsProxyConfigTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
-    Config rangerEnv = new ConfigImpl("ranger-env") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
+    Map<String, String> mockProperties = new HashMap<String, String>() {
+      {
         put("ranger_user", "ranger");
-      }};
-
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
       }
     };
 
-    Config kmsSite = new ConfigImpl("kms-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>();
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
-
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
+    Config rangerEnv = EasyMock.createNiceMock(Config.class);
+    expect(rangerEnv.getType()).andReturn("ranger-env").anyTimes();
+    expect(rangerEnv.getProperties()).andReturn(mockProperties).anyTimes();
 
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
+    Config kmsSite = EasyMock.createNiceMock(Config.class);
+    expect(kmsSite.getType()).andReturn("kms-site").anyTimes();
+    expect(kmsSite.getProperties()).andReturn(mockProperties).anyTimes();
 
     expect(cluster.getDesiredConfigByType("ranger-env")).andReturn(rangerEnv).atLeastOnce();
     expect(cluster.getDesiredConfigByType("kms-site")).andReturn(kmsSite).atLeastOnce();
@@ -90,7 +74,7 @@ public class RangerKmsProxyConfigTest {
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
     expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
 
-    replay(m_injector, m_clusters, cluster);
+    replay(m_injector, m_clusters, cluster, rangerEnv, kmsSite);
 
     m_clusterField = RangerKmsProxyConfig.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);

+ 8 - 22
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java

@@ -36,7 +36,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -58,33 +57,20 @@ public class SparkShufflePropertyConfigTest {
     m_clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
 
+    Map<String, String> mockProperties = new HashMap<String, String>() {{
+      put("yarn.nodemanager.aux-services", "some_service");
+    }};
 
-    Config adminConfig = new ConfigImpl("yarn-site") {
-      Map<String, String> mockProperties = new HashMap<String, String>() {{
-        put("yarn.nodemanager.aux-services", "some_service");
-      }};
-      @Override
-      public Map<String, String> getProperties() {
-        return mockProperties;
-      }
+    Config yarnConfig = EasyMock.createNiceMock(Config.class);
+    expect(yarnConfig.getType()).andReturn("yarn-site").anyTimes();
+    expect(yarnConfig.getProperties()).andReturn(mockProperties).anyTimes();
 
-      @Override
-      public void setProperties(Map<String, String> properties) {
-        mockProperties.putAll(properties);
-      }
-
-      @Override
-      public void persist(boolean newConfig) {
-        // no-op
-      }
-    };
-
-    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(adminConfig).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(yarnConfig).atLeastOnce();
 
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
     expect(m_injector.getInstance(Clusters.class)).andReturn(m_clusters).atLeastOnce();
 
-    replay(m_injector, m_clusters);
+    replay(m_injector, m_clusters, yarnConfig);
 
     clusterField = SparkShufflePropertyConfig.class.getDeclaredField("clusters");
     clusterField.setAccessible(true);

+ 14 - 14
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java

@@ -67,7 +67,7 @@ import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -153,6 +153,8 @@ public class UpgradeActionTest {
   private AmbariMetaInfo ambariMetaInfo;
   @Inject
   private FinalizeUpgradeAction finalizeUpgradeAction;
+  @Inject
+  private ConfigFactory configFactory;
 
   @Before
   public void setup() throws Exception {
@@ -1043,24 +1045,22 @@ public class UpgradeActionTest {
     properties.put("a", "a1");
     properties.put("b", "b1");
 
-    Config c1 = new ConfigImpl(cluster, "zookeeper-env", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "zookeeper-env", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
     properties.put("zookeeper_a", "value_1");
     properties.put("zookeeper_b", "value_2");
 
-    Config c2 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector);
+    configFactory.createNew(cluster, "hdfs-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
     properties.put("hdfs_a", "value_3");
     properties.put("hdfs_b", "value_4");
 
-    Config c3 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector);
-    Config c4 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector);
-
-    cluster.addConfig(c1);
-    cluster.addConfig(c2);
-    cluster.addConfig(c3);
-    cluster.addConfig(c4);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-    c4.persist();
+    configFactory.createNew(cluster, "core-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
+
+    configFactory.createNew(cluster, "foo-site", "version-" + System.currentTimeMillis(),
+        properties, propertiesAttributes);
   }
 }

+ 12 - 14
ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java

@@ -89,8 +89,7 @@ public class ConfigGroupTest {
     Map<String, String> attributes = new HashMap<String, String>();
     attributes.put("a", "true");
     propertiesAttributes.put("final", attributes);
-    Config config = configFactory.createNew(cluster, "hdfs-site", properties, propertiesAttributes);
-    config.setTag("testversion");
+    Config config = configFactory.createNew(cluster, "hdfs-site", "testversion", properties, propertiesAttributes);
 
     Host host = clusters.getHost("h1");
 
@@ -103,7 +102,6 @@ public class ConfigGroupTest {
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "cg-test",
       "HDFS", "New HDFS configs for h1", configs, hosts);
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
     return configGroup;
   }
@@ -154,28 +152,28 @@ public class ConfigGroupTest {
     Map<String, String> attributes = new HashMap<String, String>();
     attributes.put("key1", "true");
     propertiesAttributes.put("final", attributes);
-    Config config = new ConfigImpl("test-site");
-    config.setProperties(properties);
-    config.setPropertiesAttributes(propertiesAttributes);
-    config.setTag("version100");
 
-    configGroup.addConfiguration(config);
+    Config config = configFactory.createNew(cluster, "test-site", "version100", properties, propertiesAttributes);
+    Map<String, Config> newConfigurations = new HashMap<>(configGroup.getConfigurations());
+    newConfigurations.put(config.getType(), config);
+
+    configGroup.setConfigurations(newConfigurations);
     Assert.assertEquals(2, configGroup.getConfigurations().values().size());
 
+    // re-request it and verify that the config was added
+    configGroupEntity = configGroupDAO.findById(configGroup.getId());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupConfigMappingEntities().size());
+
     configGroup.setName("NewName");
     configGroup.setDescription("NewDesc");
     configGroup.setTag("NewTag");
 
     // Save
-    configGroup.persist();
-    configGroup.refresh();
     configGroupEntity = configGroupDAO.findByName("NewName");
 
     Assert.assertNotNull(configGroupEntity);
-    Assert.assertEquals(2, configGroupEntity
-      .getConfigGroupHostMappingEntities().size());
-    Assert.assertEquals(2, configGroupEntity
-      .getConfigGroupConfigMappingEntities().size());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupHostMappingEntities().size());
+    Assert.assertEquals(2, configGroupEntity.getConfigGroupConfigMappingEntities().size());
     Assert.assertEquals("NewTag", configGroupEntity.getTag());
     Assert.assertEquals("NewDesc", configGroupEntity.getDescription());
     Assert.assertNotNull(cluster.getConfig("test-site", "version100"));

+ 23 - 26
ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java

@@ -38,7 +38,6 @@ import javax.persistence.EntityManager;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.ClusterRequest;
@@ -88,6 +87,7 @@ public class ConfigHelperTest {
     private static ConfigHelper configHelper;
     private static AmbariManagementController managementController;
     private static AmbariMetaInfo metaInfo;
+    private static ConfigFactory configFactory;
 
     @BeforeClass
     public static void setup() throws Exception {
@@ -102,6 +102,7 @@ public class ConfigHelperTest {
       configHelper = injector.getInstance(ConfigHelper.class);
       managementController = injector.getInstance(AmbariManagementController.class);
       metaInfo = injector.getInstance(AmbariMetaInfo.class);
+      configFactory = injector.getInstance(ConfigFactory.class);
 
       clusterName = "c1";
       clusters.addCluster(clusterName, new StackId("HDP-2.0.6"));
@@ -251,7 +252,6 @@ public class ConfigHelperTest {
       LOG.info("Config group created with tag " + tag);
       configGroup.setTag(tag);
 
-      configGroup.persist();
       cluster.addConfigGroup(configGroup);
 
       return configGroup.getId();
@@ -339,14 +339,11 @@ public class ConfigHelperTest {
         add(clusterRequest6);
       }}, null);
 
-      final Config config = new ConfigImpl("ams-env");
-      config.setTag("version122");
-
       Map<String, String> properties = new HashMap<String, String>();
       properties.put("a", "b");
       properties.put("c", "d");
-      config.setProperties(properties);
 
+      final Config config = configFactory.createNew(cluster, "ams-env", "version122", properties, null);
       Long groupId = addConfigGroup("g1", "t1", new ArrayList<String>() {{
         add("h1");
       }}, new ArrayList<Config>() {{
@@ -419,19 +416,14 @@ public class ConfigHelperTest {
         add(clusterRequest3);
       }}, null);
 
-      final Config config1 = new ConfigImpl("core-site2");
-      config1.setTag("version122");
-
       Map<String, String> properties = new HashMap<String, String>();
       properties.put("a", "b");
       properties.put("c", "d");
-      config1.setProperties(properties);
+      final Config config1 = configFactory.createNew(cluster, "core-site2", "version122", properties, null);
 
-      final Config config2 = new ConfigImpl("global2");
-      config2.setTag("version122");
       Map<String, String> properties2 = new HashMap<String, String>();
       properties2.put("namenode_heapsize", "1111");
-      config2.setProperties(properties2);
+      final Config config2 = configFactory.createNew(cluster, "global2", "version122", properties2, null);
 
       Long groupId = addConfigGroup("g2", "t1", new ArrayList<String>() {{
         add("h1");
@@ -511,24 +503,23 @@ public class ConfigHelperTest {
       }}, null);
 
 
-      final Config config1 = new ConfigImpl("core-site3");
-      config1.setTag("version122");
-
       Map<String, String> attributes = new HashMap<String, String>();
       attributes.put("fs.trash.interval", "11");
       attributes.put("b", "y");
       Map<String, Map<String, String>> config1Attributes = new HashMap<String, Map<String, String>>();
       config1Attributes.put("attribute1", attributes);
-      config1.setPropertiesAttributes(config1Attributes);
 
-      final Config config2 = new ConfigImpl("global3");
-      config2.setTag("version122");
+      final Config config1 = configFactory.createNew(cluster, "core-site3", "version122",
+          new HashMap<String, String>(), config1Attributes);
+
       attributes = new HashMap<String, String>();
       attributes.put("namenode_heapsize", "z");
       attributes.put("c", "q");
       Map<String, Map<String, String>> config2Attributes = new HashMap<String, Map<String, String>>();
       config2Attributes.put("attribute2", attributes);
-      config2.setPropertiesAttributes(config2Attributes);
+
+      final Config config2 = configFactory.createNew(cluster, "global3", "version122",
+          new HashMap<String, String>(), config2Attributes);
 
       Long groupId = addConfigGroup("g3", "t1", new ArrayList<String>() {{
         add("h3");
@@ -690,7 +681,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -718,7 +710,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -744,7 +737,8 @@ public class ConfigHelperTest {
       confGroupProperties.put("b", "any");
       confGroupProperties.put("c", "any");
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", confGroupProperties, null, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", null,
+          confGroupProperties, null);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -772,7 +766,8 @@ public class ConfigHelperTest {
       confGroupFinalAttrs.put("b", "true");
       confGroupAttributes.put("final", confGroupFinalAttrs);
 
-      Config overrideConfig = new ConfigImpl(cluster, "type", null, confGroupAttributes, injector);
+      Config overrideConfig = configFactory.createNew(cluster, "type", "version122",
+          new HashMap<String,String>(), confGroupAttributes);
 
       Map<String, Map<String, String>> result
           = configHelper.overrideAttributes(overrideConfig, persistedAttributes);
@@ -921,8 +916,10 @@ public class ConfigHelperTest {
       List<String> hosts = new ArrayList<String>();
       hosts.add("h1");
       List<Config> configs = new ArrayList<Config>();
-      ConfigImpl configImpl = new ConfigImpl("flume-conf");
-      configImpl.setTag("FLUME1");
+
+      Config configImpl = configFactory.createNew(cluster, "flume-conf", "FLUME1",
+          new HashMap<String,String>(), null);
+
       configs.add(configImpl);
       addConfigGroup("configGroup1", "FLUME", hosts, configs);
 

+ 2 - 6
ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java

@@ -56,12 +56,12 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.UnitOfWork;
-import org.junit.experimental.categories.Category;
 
 /**
  * Tests the {@link AlertReceivedListener}.
@@ -835,17 +835,13 @@ public class AlertReceivedListenerTest {
   @SuppressWarnings("serial")
   public void testAlertFirmnessUsingGlobalValueHigherThanOverride() throws Exception {
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(m_cluster, ConfigHelper.CLUSTER_ENV,
+    Config config = cf.createNew(m_cluster, ConfigHelper.CLUSTER_ENV, "version2",
         new HashMap<String, String>() {
           {
             put(ConfigHelper.CLUSTER_ENV_ALERT_REPEAT_TOLERANCE, "3");
           }
         }, new HashMap<String, Map<String, String>>());
 
-    config.setTag("version2");
-    config.persist();
-
-    m_cluster.addConfig(config);
     m_cluster.addDesiredConfig("user", Collections.singleton(config));
 
     String definitionName = ALERT_DEFINITION + "1";

+ 7 - 10
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java

@@ -124,14 +124,11 @@ public class ClusterDeadlockTest {
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
-    Config config1 = configFactory.createNew(cluster, "test-type1", new HashMap<String, String>(), new HashMap<String,
+    Config config1 = configFactory.createNew(cluster, "test-type1", "version1", new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
-    Config config2 = configFactory.createNew(cluster, "test-type2", new HashMap<String, String>(), new HashMap<String,
+    Config config2 = configFactory.createNew(cluster, "test-type2", "version1", new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
-    config1.persist();
-    config2.persist();
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
+
     cluster.addDesiredConfig("test user", new HashSet<Config>(Arrays.asList(config1, config2)));
 
     // 100 hosts
@@ -186,7 +183,7 @@ public class ClusterDeadlockTest {
     }
 
     DeadlockWarningThread wt = new DeadlockWarningThread(threads);
-    
+
     while (true) {
       if(!wt.isAlive()) {
           break;
@@ -221,7 +218,7 @@ public class ClusterDeadlockTest {
     }
 
     DeadlockWarningThread wt = new DeadlockWarningThread(threads);
-    
+
     while (true) {
       if(!wt.isAlive()) {
           break;
@@ -267,7 +264,7 @@ public class ClusterDeadlockTest {
       clusterWriterThread.start();
       schWriterThread.start();
     }
-    
+
     DeadlockWarningThread wt = new DeadlockWarningThread(threads, 20, 1000);
     while (true) {
       if(!wt.isAlive()) {
@@ -337,7 +334,7 @@ public class ClusterDeadlockTest {
     @Override
     public void run() {
       for (int i =0; i<300; i++) {
-        config.persist(false);
+        config.save();
       }
     }
   }

+ 32 - 101
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java

@@ -87,7 +87,6 @@ import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostHealthStatus;
@@ -964,21 +963,14 @@ public class ClusterTest {
     Map<String, Map<String, String>> c2PropAttributes = new HashMap<String, Map<String,String>>();
     c2PropAttributes.put("final", new HashMap<String, String>());
     c2PropAttributes.get("final").put("x", "true");
-    Config config1 = configFactory.createNew(c1, "global",
+    Config config1 = configFactory.createNew(c1, "global", "version1",
         new HashMap<String, String>() {{ put("a", "b"); }}, c1PropAttributes);
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "global",
+    Config config2 = configFactory.createNew(c1, "global", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, c2PropAttributes);
-    config2.setTag("version2");
 
-    Config config3 = configFactory.createNew(c1, "core-site",
+    Config config3 = configFactory.createNew(c1, "core-site", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
-    c1.addConfig(config3);
 
     c1.addDesiredConfig("_test", Collections.singleton(config1));
     Config res = c1.getDesiredConfigByType("global");
@@ -998,21 +990,14 @@ public class ClusterTest {
   public void testDesiredConfigs() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "global",
+    Config config1 = configFactory.createNew(c1, "global", "version1",
         new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "global",
+    Config config2 = configFactory.createNew(c1, "global", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
 
-    Config config3 = configFactory.createNew(c1, "core-site",
+    Config config3 = configFactory.createNew(c1, "core-site", "version2",
         new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config3.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
-    c1.addConfig(config3);
 
     try {
       c1.addDesiredConfig(null, Collections.singleton(config1));
@@ -1132,18 +1117,11 @@ public class ClusterTest {
 
     c1.addService("HDFS");
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "core-site",
+    Config config2 = configFactory.createNew(c1, "core-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    config1.persist();
-    c1.addConfig(config1);
-    config2.persist();
-    c1.addConfig(config2);
 
     Set<Config> configs = new HashSet<Config>();
     configs.add(config1);
@@ -1209,10 +1187,9 @@ public class ClusterTest {
     Map<String, Map<String, String>> propAttributes = new HashMap<String, Map<String,String>>();
     propAttributes.put("final", new HashMap<String, String>());
     propAttributes.get("final").put("test", "true");
-    Config config = configFactory.createNew(c1, "hdfs-site", new HashMap<String, String>(){{
+    Config config = configFactory.createNew(c1, "hdfs-site", "1", new HashMap<String, String>(){{
       put("test", "test");
     }}, propAttributes);
-    config.setTag("1");
 
     host1.addDesiredConfig(c1.getClusterId(), true, "test", config);
 
@@ -1247,16 +1224,11 @@ public class ClusterTest {
   public void testServiceConfigVersions() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "hdfs-site",
+    Config config2 = configFactory.createNew(c1, "hdfs-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
 
     c1.addDesiredConfig("admin", Collections.singleton(config1));
     List<ServiceConfigVersionResponse> serviceConfigVersions =
@@ -1310,16 +1282,11 @@ public class ClusterTest {
   public void testSingleServiceVersionForMultipleConfigs() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
 
-    Config config2 = configFactory.createNew(c1, "core-site",
+    Config config2 = configFactory.createNew(c1, "core-site", "version2",
       new HashMap<String, String>() {{ put("x", "y"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
-
-    c1.addConfig(config1);
-    c1.addConfig(config2);
 
     Set<Config> configs = new HashSet<Config>();
     configs.add(config1);
@@ -1345,11 +1312,8 @@ public class ClusterTest {
   public void testServiceConfigVersionsForGroups() throws Exception {
     createDefaultCluster();
 
-    Config config1 = configFactory.createNew(c1, "hdfs-site",
+    Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("version1");
-
-    c1.addConfig(config1);
 
     ServiceConfigVersionResponse scvResponse =
       c1.addDesiredConfig("admin", Collections.singleton(config1));
@@ -1361,16 +1325,13 @@ public class ClusterTest {
     Assert.assertEquals("Only one scv should be active", 1, activeServiceConfigVersions.get("HDFS").size());
 
     //create config group
-    Config config2 = configFactory.createNew(c1, "hdfs-site",
+    Config config2 = configFactory.createNew(c1, "hdfs-site", "version2",
       new HashMap<String, String>() {{ put("a", "c"); }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("version2");
 
     ConfigGroup configGroup =
       configGroupFactory.createNew(c1, "test group", "HDFS", "descr", Collections.singletonMap("hdfs-site", config2),
         Collections.<Long, Host>emptyMap());
 
-    configGroup.persist();
-
     c1.addConfigGroup(configGroup);
 
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1381,12 +1342,11 @@ public class ClusterTest {
     Assert.assertEquals("Two service config versions should be active, for default and test groups",
       2, activeServiceConfigVersions.get("HDFS").size());
 
-    Config config3 = configFactory.createNew(c1, "hdfs-site",
+    Config config3 = configFactory.createNew(c1, "hdfs-site", "version3",
       new HashMap<String, String>() {{ put("a", "d"); }}, new HashMap<String, Map<String,String>>());
 
     configGroup.setConfigurations(Collections.singletonMap("hdfs-site", config3));
 
-    configGroup.persist();
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
     assertEquals("SCV 3 should be created", Long.valueOf(3), scvResponse.getVersion());
 
@@ -1417,16 +1377,14 @@ public class ClusterTest {
 
     //check config with empty cluster
 
-    Config config4 = new ConfigImpl("hdfs-site");
-    config4.setProperties(new HashMap<String, String>() {{
-      put("a", "b");
-    }});
+    Config config4 = configFactory.createReadOnly("hdfs-site", "version4",
+        Collections.singletonMap("a", "b"), null);
 
     ConfigGroup configGroup2 =
-        configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr", Collections.singletonMap("hdfs-site", config4),
+        configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr",
+            new HashMap<>(Collections.singletonMap("hdfs-site", config4)),
             Collections.<Long, Host>emptyMap());
 
-    configGroup2.persist();
     c1.addConfigGroup(configGroup2);
 
     scvResponse = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup2);
@@ -1443,12 +1401,8 @@ public class ClusterTest {
     // Given
     createDefaultCluster();
 
-    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV1.setTag("version1");
-    hdfsSiteConfigV1.persist();
-
-    c1.addConfig(hdfsSiteConfigV1);
-
+    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
+        ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
 
     ServiceConfigVersionResponse hdfsSiteConfigResponseV1 = c1.addDesiredConfig("admin", Collections.singleton(hdfsSiteConfigV1));
     List<ConfigurationResponse> configResponsesDefaultGroup =  Collections.singletonList(
@@ -1459,11 +1413,10 @@ public class ClusterTest {
 
     hdfsSiteConfigResponseV1.setConfigurations(configResponsesDefaultGroup);
 
-    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV2.setTag("version2");
+    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
+        ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
 
     ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
-    configGroup.persist();
 
     c1.addConfigGroup(configGroup);
     ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1507,12 +1460,8 @@ public class ClusterTest {
     // Given
     createDefaultCluster();
 
-    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV1.setTag("version1");
-    hdfsSiteConfigV1.persist();
-
-    c1.addConfig(hdfsSiteConfigV1);
-
+    Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
+        ImmutableMap.of("p1", "v1"), ImmutableMap.<String, Map<String,String>>of());
 
     ServiceConfigVersionResponse hdfsSiteConfigResponseV1 = c1.addDesiredConfig("admin", Collections.singleton(hdfsSiteConfigV1));
     List<ConfigurationResponse> configResponsesDefaultGroup =  Collections.singletonList(
@@ -1523,11 +1472,10 @@ public class ClusterTest {
 
     hdfsSiteConfigResponseV1.setConfigurations(configResponsesDefaultGroup);
 
-    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
-    hdfsSiteConfigV2.setTag("version2");
+    Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
+        ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
 
     ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
-    configGroup.persist();
 
     c1.addConfigGroup(configGroup);
     ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -2373,17 +2321,13 @@ public class ClusterTest {
     ClusterEntity clusterEntity = clusterDAO.findByName("c1");
     assertEquals(0, clusterEntity.getClusterConfigEntities().size());
 
-    final Config originalConfig = configFactory.createNew(cluster, "foo-site",
+    final Config originalConfig = configFactory.createNew(cluster, "foo-site", "version3",
         new HashMap<String, String>() {
           {
             put("one", "two");
           }
         }, new HashMap<String, Map<String, String>>());
 
-    originalConfig.setTag("version3");
-    originalConfig.persist();
-    cluster.addConfig(originalConfig);
-
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1", "t1", "",
         new HashMap<String, Config>() {
           {
@@ -2391,7 +2335,6 @@ public class ClusterTest {
           }
         }, Collections.<Long, Host> emptyMap());
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     clusterEntity = clusterDAO.findByName("c1");
@@ -2403,8 +2346,7 @@ public class ClusterTest {
     Map<String, String> properties = config.getProperties();
     properties.put("three", "four");
     config.setProperties(properties);
-
-    config.persist(false);
+    config.save();
 
     clusterEntity = clusterDAO.findByName("c1");
     assertEquals(1, clusterEntity.getClusterConfigEntities().size());
@@ -2545,13 +2487,7 @@ public class ClusterTest {
 
     // foo-type for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c1.setTag("version-1");
-    c1.setStackId(stackId);
-    c1.setVersion(1L);
-
-    cluster.addConfig(c1);
-    c1.persist();
+    Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -2562,12 +2498,7 @@ public class ClusterTest {
     // save v2
     // foo-type for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = new ConfigImpl(cluster, "foo-type", properties, propertiesAttributes, injector);
-    c2.setTag("version-2");
-    c2.setStackId(newStackId);
-    c2.setVersion(2L);
-    cluster.addConfig(c2);
-    c2.persist();
+    Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");

+ 2 - 6
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java

@@ -405,19 +405,15 @@ public class ClustersTest {
     cluster.transitionClusterVersion(stackId, stackId.getStackVersion(),
         RepositoryVersionState.CURRENT);
 
-    final Config config1 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1",
+    final Config config1 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", "1",
         new HashMap<String, String>() {{
           put("prop1", "val1");
         }}, new HashMap<String, Map<String,String>>());
-    config1.setTag("1");
-    config1.persist();
 
-    Config config2 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1",
+    Config config2 = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", "2",
         new HashMap<String, String>() {{
           put("prop2", "val2");
         }}, new HashMap<String, Map<String,String>>());
-    config2.setTag("2");
-    config2.persist();
 
     // cluster desired config
     cluster.addDesiredConfig("_test", Collections.singleton(config1));

+ 2 - 7
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java

@@ -114,17 +114,12 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
-    Config config1 = configFactory.createNew(cluster, "test-type1", new HashMap<String, String>(), new HashMap<String,
+    Config config1 = configFactory.createNew(cluster, "test-type1", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
 
-    Config config2 = configFactory.createNew(cluster, "test-type2", new HashMap<String, String>(), new HashMap<String,
+    Config config2 = configFactory.createNew(cluster, "test-type2", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
 
-    config1.persist();
-    config2.persist();
-
-    cluster.addConfig(config1);
-    cluster.addConfig(config2);
     cluster.addDesiredConfig("test user", new HashSet<Config>(Arrays.asList(config1, config2)));
 
     String hostName = "c6401";

+ 2 - 4
ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java

@@ -384,7 +384,7 @@ public class HostTest {
     clusters.mapHostToCluster("h1", "c1");
 
     ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-    Config config = configFactory.createNew(c1, "global",
+    Config config = configFactory.createNew(c1, "global", "v1",
         new HashMap<String,String>() {{ put("a", "b"); put("x", "y"); }}, new HashMap<String, Map<String,String>>());
 
     try {
@@ -396,16 +396,14 @@ public class HostTest {
     }
 
 
-    config.setTag("v1");
     host.addDesiredConfig(c1.getClusterId(), true, "_test", config);
 
     Map<String, DesiredConfig> map = host.getDesiredConfigs(c1.getClusterId());
     Assert.assertTrue("Expect desired config to contain global", map.containsKey("global"));
     Assert.assertEquals("Expect global user to be '_test'", "_test", map.get("global").getUser());
 
-    config = configFactory.createNew(c1, "global",
+    config = configFactory.createNew(c1, "global", "v2",
         new HashMap<String,String>() {{ put("c", "d"); }}, new HashMap<String, Map<String,String>>());
-    config.setTag("v2");
     host.addDesiredConfig(c1.getClusterId(), true, "_test1", config);
 
     map = host.getDesiredConfigs(c1.getClusterId());

+ 5 - 19
ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java

@@ -221,11 +221,8 @@ public class ServiceComponentHostTest {
 
     Cluster c = clusters.getCluster(clusterName);
     if (c.getConfig("time", String.valueOf(timestamp)) == null) {
-      Config config = configFactory.createNew (c, "time",
+      Config config = configFactory.createNew (c, "time", String.valueOf(timestamp),
           new HashMap<String, String>(), new HashMap<String, Map<String,String>>());
-      config.setTag(String.valueOf(timestamp));
-      c.addConfig(config);
-      config.persist();
     }
 
     switch (eventType) {
@@ -564,7 +561,6 @@ public class ServiceComponentHostTest {
     final ConfigGroup configGroup = configGroupFactory.createNew(cluster,
       "cg1", "t1", "", new HashMap<String, Config>(), new HashMap<Long, Host>());
 
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     Map<String, Map<String,String>> actual =
@@ -815,17 +811,14 @@ public class ServiceComponentHostTest {
     final Host host = clusters.getHostsForCluster(clusterName).get(hostName);
     Assert.assertNotNull(host);
 
-    final Config c = configFactory.createNew(cluster, "hdfs-site",
+    final Config c = configFactory.createNew(cluster, "hdfs-site", "version3",
         new HashMap<String, String>() {{ put("dfs.journalnode.http-address", "http://goo"); }},
         new HashMap<String, Map<String,String>>());
-    c.setTag("version3");
-    c.persist();
-    cluster.addConfig(c);
+
     host.addDesiredConfig(cluster.getClusterId(), true, "user", c);
     ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1",
       "t1", "", new HashMap<String, Config>() {{ put("hdfs-site", c); }},
       new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     // HDP-x/HDFS/hdfs-site updated host to changed property
@@ -876,16 +869,12 @@ public class ServiceComponentHostTest {
 
     sch1.updateActualConfigs(actual);
 
-    final Config c1 = configFactory.createNew(cluster, "core-site",
+    final Config c1 = configFactory.createNew(cluster, "core-site", "version2",
       new HashMap<String, String>() {{ put("fs.trash.interval", "400"); }},
       new HashMap<String, Map<String,String>>());
-    c1.setTag("version2");
-    c1.persist();
-    cluster.addConfig(c1);
     configGroup = configGroupFactory.createNew(cluster, "g2",
       "t2", "", new HashMap<String, Config>() {{ put("core-site", c1); }},
       new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
-    configGroup.persist();
     cluster.addConfigGroup(configGroup);
 
     Assert.assertTrue(sch1.convertToResponse(null).isStaleConfig());
@@ -1039,10 +1028,7 @@ public class ServiceComponentHostTest {
    * @param values the values for the config
    */
   private void makeConfig(Cluster cluster, String type, String tag, Map<String, String> values, Map<String, Map<String, String>> attributes) {
-    Config config = configFactory.createNew(cluster, type, values, attributes);
-    config.setTag(tag);
-    config.persist();
-    cluster.addConfig(config);
+    Config config = configFactory.createNew(cluster, type, tag, values, attributes);
     cluster.addDesiredConfig("user", Collections.singleton(config));
   }
 

+ 33 - 5
ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java

@@ -59,6 +59,7 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
@@ -66,13 +67,14 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.easymock.Capture;
+import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableList;
 
 /**
  * AmbariContext unit tests
@@ -110,6 +112,7 @@ public class AmbariContextTest {
   private static final ConfigGroup configGroup2 = createMock(ConfigGroup.class);
   private static final Host host1 = createNiceMock(Host.class);
   private static final Host host2 = createNiceMock(Host.class);
+  private static final ConfigFactory configFactory = createNiceMock(ConfigFactory.class);
 
   private static final Collection<String> blueprintServices = new HashSet<String>();
   private static final Map<String, Service> clusterServices = new HashMap<String, Service>();
@@ -164,6 +167,9 @@ public class AmbariContextTest {
     type1Props.put("prop3", "val3");
     group1Configuration = new Configuration(group1Properties, null, bpConfiguration);
 
+    Map<String, String> group1ResolvedProperties = new HashMap<String, String>(bpType1Props);
+    group1ResolvedProperties.putAll(type1Props);
+
     // config type -> service mapping
     Map<String, String> configTypeServiceMapping = new HashMap<String, String>();
     configTypeServiceMapping.put("type1", "service1");
@@ -172,6 +178,28 @@ public class AmbariContextTest {
     configGroups.put(1L, configGroup1);
     configGroups.put(2L, configGroup2);
 
+    // config factory mock
+    Config type1Group1 = createNiceMock(Config.class);
+    expect(type1Group1.getType()).andReturn("type1").anyTimes();
+    expect(type1Group1.getTag()).andReturn("group1").anyTimes();
+    expect(type1Group1.getProperties()).andReturn(group1ResolvedProperties).anyTimes();
+    expect(configFactory.createReadOnly(EasyMock.eq("type1"), EasyMock.eq("group1"),
+        EasyMock.<Map<String, String>> anyObject(),
+        EasyMock.<Map<String, Map<String, String>>> anyObject())).andReturn(type1Group1).anyTimes();
+    replay(type1Group1);
+
+    Config type1Service1 = createNiceMock(Config.class);
+    expect(type1Service1.getType()).andReturn("type1").anyTimes();
+    expect(type1Service1.getTag()).andReturn("service1").anyTimes();
+    expect(type1Service1.getProperties()).andReturn(type1Props).anyTimes();
+    expect(configFactory.createReadOnly(EasyMock.eq("type1"), EasyMock.eq("service1"),
+        EasyMock.<Map<String, String>> anyObject(),
+        EasyMock.<Map<String, Map<String, String>>> anyObject())).andReturn(
+            type1Service1).anyTimes();
+    replay(type1Service1);
+
+    context.configFactory = configFactory;
+
     blueprintServices.add("service1");
     blueprintServices.add("service2");
 
@@ -222,17 +250,17 @@ public class AmbariContextTest {
   public void tearDown() throws Exception {
     verify(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
 
     reset(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   private void replayAll() {
     replay(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2);
+        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   @Test
@@ -330,6 +358,7 @@ public class AmbariContextTest {
     expect(clusterController.ensureResourceProvider(Resource.Type.ConfigGroup)).andReturn(configGroupResourceProvider).once();
     //todo: for now not using return value so just returning null
     expect(configGroupResourceProvider.createResources(capture(configGroupRequestCapture))).andReturn(null).once();
+
     // replay all mocks
     replayAll();
 
@@ -416,7 +445,6 @@ public class AmbariContextTest {
 
     expect(configGroup1.getHosts()).andReturn(Collections.singletonMap(2L, host2)).once();
     configGroup1.addHost(host1);
-    configGroup1.persistHostMapping();
 
     // replay all mocks
     replayAll();

+ 17 - 23
ambari-server/src/test/java/org/apache/ambari/server/update/HostUpdateHelperTest.java

@@ -49,6 +49,8 @@ import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.utils.CollectionPresentationUtils;
@@ -62,6 +64,7 @@ import com.google.gson.JsonPrimitive;
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
 
 import junit.framework.Assert;
 
@@ -212,16 +215,12 @@ public class HostUpdateHelperTest {
     Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
     Cluster mockCluster = easyMockSupport.createNiceMock(Cluster.class);
     ClusterEntity mockClusterEntity1 = easyMockSupport.createNiceMock(ClusterEntity.class);
-    ClusterEntity mockClusterEntity2 = easyMockSupport.createNiceMock(ClusterEntity.class);
     ClusterConfigEntity mockClusterConfigEntity1 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     ClusterConfigEntity mockClusterConfigEntity2 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
-    ClusterConfigEntity mockClusterConfigEntity3 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
-    ClusterConfigEntity mockClusterConfigEntity4 = easyMockSupport.createNiceMock(ClusterConfigEntity.class);
     StackEntity mockStackEntity = easyMockSupport.createNiceMock(StackEntity.class);
     Map<String, Map<String, String>> clusterHostsToChange = new HashMap<>();
     Map<String, String> hosts = new HashMap<>();
     List<ClusterConfigEntity> clusterConfigEntities1 = new ArrayList<>();
-    List<ClusterConfigEntity> clusterConfigEntities2 = new ArrayList<>();
 
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
@@ -231,6 +230,8 @@ public class HostUpdateHelperTest {
         bind(EntityManager.class).toInstance(entityManager);
         bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
         bind(ClusterDAO.class).toInstance(mockClusterDAO);
+
+        install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class));
       }
     });
 
@@ -242,49 +243,42 @@ public class HostUpdateHelperTest {
     clusterConfigEntities1.add(mockClusterConfigEntity1);
     clusterConfigEntities1.add(mockClusterConfigEntity2);
 
-    clusterConfigEntities2.add(mockClusterConfigEntity3);
-    clusterConfigEntities2.add(mockClusterConfigEntity4);
-
     clusterHostsToChange.put("cl1", hosts);
 
-    expect(mockClusterDAO.findByName("cl1")).andReturn(mockClusterEntity1).once();
-    expect(mockClusterDAO.findById(1L)).andReturn(mockClusterEntity2).atLeastOnce();
+    expect(mockClusterDAO.findByName("cl1")).andReturn(mockClusterEntity1).atLeastOnce();
 
     expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
 
     expect(mockClusters.getCluster("cl1")).andReturn(mockCluster).once();
-    expect(mockCluster.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockCluster.getClusterId()).andReturn(1L).anyTimes();
 
     expect(mockClusterEntity1.getClusterConfigEntities()).andReturn(clusterConfigEntities1).atLeastOnce();
-    expect(mockClusterEntity2.getClusterConfigEntities()).andReturn(clusterConfigEntities2).atLeastOnce();
 
-    expect(mockClusterConfigEntity1.getStack()).andReturn(mockStackEntity).once();
+    expect(mockClusterConfigEntity1.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity1.getConfigId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity1.getStack()).andReturn(mockStackEntity).atLeastOnce();
     expect(mockClusterConfigEntity1.getData()).andReturn("{\"testProperty1\" : \"testValue_host1\", " +
             "\"testProperty2\" : \"testValue_host5\", \"testProperty3\" : \"testValue_host11\", " +
             "\"testProperty4\" : \"testValue_host55\"}").atLeastOnce();
     expect(mockClusterConfigEntity1.getTag()).andReturn("testTag1").atLeastOnce();
     expect(mockClusterConfigEntity1.getType()).andReturn("testType1").atLeastOnce();
     expect(mockClusterConfigEntity1.getVersion()).andReturn(1L).atLeastOnce();
+    expect(mockClusterDAO.findConfig(1L)).andReturn(mockClusterConfigEntity1).atLeastOnce();
 
-    expect(mockClusterConfigEntity2.getStack()).andReturn(mockStackEntity).once();
+    expect(mockClusterConfigEntity2.getClusterId()).andReturn(1L).atLeastOnce();
+    expect(mockClusterConfigEntity2.getConfigId()).andReturn(2L).anyTimes();
+    expect(mockClusterConfigEntity2.getStack()).andReturn(mockStackEntity).atLeastOnce();
     expect(mockClusterConfigEntity2.getData()).andReturn("{\"testProperty5\" : \"test_host1_test_host5_test_host11_test_host55\"}").atLeastOnce();
     expect(mockClusterConfigEntity2.getTag()).andReturn("testTag2").atLeastOnce();
     expect(mockClusterConfigEntity2.getType()).andReturn("testType2").atLeastOnce();
     expect(mockClusterConfigEntity2.getVersion()).andReturn(2L).atLeastOnce();
-
-    expect(mockClusterConfigEntity3.getTag()).andReturn("testTag1").atLeastOnce();
-    expect(mockClusterConfigEntity3.getType()).andReturn("testType1").atLeastOnce();
-    expect(mockClusterConfigEntity3.getVersion()).andReturn(1L).atLeastOnce();
-
-    expect(mockClusterConfigEntity4.getTag()).andReturn("testTag2").atLeastOnce();
-    expect(mockClusterConfigEntity4.getType()).andReturn("testType2").atLeastOnce();
-    expect(mockClusterConfigEntity4.getVersion()).andReturn(2L).atLeastOnce();
+    expect(mockClusterDAO.findConfig(2L)).andReturn(mockClusterConfigEntity2).atLeastOnce();
 
     Capture<String> dataCapture = EasyMock.newCapture();
-    mockClusterConfigEntity3.setData(EasyMock.capture(dataCapture));
+    mockClusterConfigEntity1.setData(EasyMock.capture(dataCapture));
     expectLastCall();
 
-    mockClusterConfigEntity4.setData("{\"testProperty5\":\"test_host5_test_host1_test_host55_test_host11\"}");
+    mockClusterConfigEntity2.setData("{\"testProperty5\":\"test_host5_test_host1_test_host55_test_host11\"}");
     expectLastCall();
 
     HostUpdateHelper hostUpdateHelper = new HostUpdateHelper(null, null, mockInjector);

+ 4 - 0
ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java

@@ -65,6 +65,9 @@ import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.Service;
@@ -126,6 +129,7 @@ public class StageUtilsTest extends EasyMockSupport {
         bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
 
         install(new FactoryModuleBuilder().build(ExecutionCommandWrapperFactory.class));
+        install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class));
       }
     });