Procházet zdrojové kódy

Merge branch 'branch-feature-AMBARI-18456' into trunk

Jonathan Hurley před 9 roky
rodič
revize
f64fa72202
100 změnil soubory, kde provedl 2558 přidání a 4226 odebrání
  1. 0 1
      ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/ClusterUtils.java
  2. 1 1
      ambari-project/pom.xml
  3. 25 24
      ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
  4. 0 8
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
  5. 14 19
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  6. 4 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
  7. 13 13
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
  8. 16 22
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
  9. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
  10. 69 53
      ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java
  11. 69 68
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java
  12. 3 4
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
  13. 1 8
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
  14. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
  15. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
  16. 11 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
  17. 11 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
  18. 0 12
      ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
  19. 9 0
      ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
  20. 45 53
      ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
  21. 0 11
      ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
  22. 7 5
      ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java
  23. 0 13
      ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
  24. 0 13
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
  25. 0 6
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
  26. 246 473
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
  27. 215 427
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
  28. 286 477
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
  29. 252 376
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
  30. 36 56
      ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
  31. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
  32. 244 462
      ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
  33. 289 484
      ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
  34. 0 1
      ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
  35. 11 8
      ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java
  36. 0 1
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
  37. 0 9
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
  38. 3 5
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
  39. 8 4
      ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
  40. 59 73
      ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
  41. 13 3
      ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
  42. 63 73
      ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
  43. 37 50
      ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
  44. 3 0
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  45. 33 10
      ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
  46. 1 4
      ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java
  47. 55 67
      ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
  48. 0 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
  49. 43 142
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  50. 0 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
  51. 41 39
      ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
  52. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
  53. 5 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
  54. 0 6
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
  55. 16 36
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java
  56. 28 35
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
  57. 0 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java
  58. 0 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
  59. 0 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
  60. 24 24
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
  61. 3 3
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java
  62. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java
  63. 24 23
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
  64. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java
  65. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java
  66. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java
  67. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java
  68. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java
  69. 0 5
      ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
  70. 0 3
      ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
  71. 17 17
      ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
  72. 6 6
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
  73. 8 25
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java
  74. 11 16
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java
  75. 8 20
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
  76. 3 3
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java
  77. 10 9
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java
  78. 4 4
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java
  79. 7 6
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java
  80. 0 5
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
  81. 0 2
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
  82. 0 8
      ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
  83. 3 7
      ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
  84. 1 11
      ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
  85. 4 8
      ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
  86. 7 40
      ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
  87. 2 13
      ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
  88. 0 8
      ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
  89. 3 4
      ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
  90. 1 2
      ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
  91. 15 26
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
  92. 15 4
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
  93. 24 42
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
  94. 9 113
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
  95. 3 9
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
  96. 0 17
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
  97. 0 4
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
  98. 0 4
      ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
  99. 18 32
      ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java
  100. 18 4
      ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java

+ 0 - 1
ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/ClusterUtils.java

@@ -91,7 +91,6 @@ public class ClusterUtils {
             hostAttributes.put("os_family", "redhat");
             hostAttributes.put("os_release_version", "6.3");
             host1.setHostAttributes(hostAttributes);
-            host1.persist();
         }
 
         /**

+ 1 - 1
ambari-project/pom.xml

@@ -114,7 +114,7 @@
       <dependency>
         <groupId>org.apache.derby</groupId>
         <artifactId>derby</artifactId>
-        <version>10.9.1.0</version>
+        <version>10.12.1.1</version>
       </dependency>
       <dependency>
         <groupId>org.springframework.security</groupId>

+ 25 - 24
ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java

@@ -18,40 +18,39 @@
 
 package org.apache.ambari.server.api.services;
 
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.UriInfo;
+
 import org.apache.ambari.server.api.handlers.RequestHandler;
 import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.predicate.PredicateCompiler;
 import org.apache.ambari.server.api.predicate.QueryLexer;
 import org.apache.ambari.server.api.query.render.Renderer;
 import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.internal.SortRequestImpl;
 import org.apache.ambari.server.controller.internal.PageRequestImpl;
+import org.apache.ambari.server.controller.internal.SortRequestImpl;
 import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
-import org.apache.ambari.server.controller.spi.SortRequest;
 import org.apache.ambari.server.controller.spi.PageRequest;
 import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.SortRequest;
 import org.apache.ambari.server.controller.spi.SortRequestProperty;
 import org.apache.ambari.server.controller.spi.TemporalInfo;
 import org.apache.ambari.server.utils.RequestUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.springframework.web.context.request.RequestContextHolder;
-import org.springframework.web.context.request.ServletRequestAttributes;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Collection;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
 /**
  * Request implementation.
@@ -144,11 +143,13 @@ public abstract class BaseRequest implements Request {
       parseQueryPredicate();
       result = getRequestHandler().handleRequest(this);
     } catch (InvalidQueryException e) {
-      result =  new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
-          "Unable to compile query predicate: " + e.getMessage()));
+      String message = "Unable to compile query predicate: " + e.getMessage();
+      LOG.error(message, e);
+      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, message));
     } catch (IllegalArgumentException e) {
-      result =  new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
-          "Invalid Request: " + e.getMessage()));
+      String message = "Invalid Request: " + e.getMessage();
+      LOG.error(message, e);
+      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, message));
     }
 
     if (! result.getStatus().isErrorState()) {
@@ -322,7 +323,7 @@ public abstract class BaseRequest implements Request {
     if (queryString != null) {
       try {
         Collection<String> ignoredProperties = null;
-        switch (this.getRequestType()) {
+        switch (getRequestType()) {
           case PUT:
             ignoredProperties = m_resource.getResourceDefinition().getUpdateDirectives();
             break;

+ 0 - 8
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java

@@ -53,7 +53,6 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentFactory;
 import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.State;
@@ -548,13 +547,6 @@ public interface AmbariManagementController {
    */
   AmbariMetaInfo getAmbariMetaInfo();
 
-  /**
-   * Get the service factory for this management controller.
-   *
-   * @return the service factory
-   */
-  ServiceFactory getServiceFactory();
-
   /**
    * Get the service component factory for this management controller.
    *

+ 14 - 19
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -63,7 +63,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
 
 import javax.persistence.RollbackException;
 
@@ -3089,13 +3088,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         changedHosts, requestParameters, requestProperties,
         runSmokeTest, reconfigureClients);
 
-    Lock clusterWriteLock = cluster.getClusterGlobalLock().writeLock();
-    clusterWriteLock.lock();
-    try {
-      updateServiceStates(cluster, changedServices, changedComponents, changedHosts, ignoredHosts);
-    } finally {
-      clusterWriteLock.unlock();
-    }
+    updateServiceStates(cluster, changedServices, changedComponents, changedHosts, ignoredHosts);
+
     return requestStages;
   }
 
@@ -4716,11 +4710,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     return ambariMetaInfo;
   }
 
-  @Override
-  public ServiceFactory getServiceFactory() {
-    return serviceFactory;
-  }
-
   @Override
   public ServiceComponentFactory getServiceComponentFactory() {
     return serviceComponentFactory;
@@ -5167,13 +5156,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
 
-    if (stackInfo == null)
+    if (stackInfo == null) {
       throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+    }
 
     ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
 
-    if (extensionInfo == null)
+    if (extensionInfo == null) {
       throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+    }
 
     ExtensionHelper.validateDeleteLink(getClusters(), stackInfo, extensionInfo);
     ambariMetaInfo.getStackManager().unlinkStackAndExtension(stackInfo, extensionInfo);
@@ -5203,13 +5194,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
 
-    if (stackInfo == null)
+    if (stackInfo == null) {
       throw new StackAccessException("stackName=" + request.getStackName() + ", stackVersion=" + request.getStackVersion());
+    }
 
     ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(request.getExtensionName(), request.getExtensionVersion());
 
-    if (extensionInfo == null)
+    if (extensionInfo == null) {
       throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
+    }
 
     ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
     ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
@@ -5266,13 +5259,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException {
     StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
 
-    if (stackInfo == null)
+    if (stackInfo == null) {
       throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+    }
 
     ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
 
-    if (extensionInfo == null)
+    if (extensionInfo == null) {
       throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+    }
 
     ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
   }

+ 4 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

@@ -28,6 +28,8 @@ import java.net.URL;
 import java.util.EnumSet;
 import java.util.Enumeration;
 import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.logging.LogManager;
 
 import javax.crypto.BadPaddingException;
@@ -145,6 +147,7 @@ import org.springframework.web.context.request.RequestContextListener;
 import org.springframework.web.context.support.GenericWebApplicationContext;
 import org.springframework.web.filter.DelegatingFilterProxy;
 
+import com.google.common.base.Joiner;
 import com.google.common.util.concurrent.ServiceManager;
 import com.google.gson.Gson;
 import com.google.inject.Guice;
@@ -864,7 +867,7 @@ public class AmbariServer {
 
     BaseService.init(injector.getInstance(RequestAuditLogger.class));
 
-    RetryHelper.init(configs.getOperationsRetryAttempts());
+    RetryHelper.init(injector.getInstance(Clusters.class), configs.getOperationsRetryAttempts());
   }
 
   /**

+ 13 - 13
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java

@@ -17,9 +17,16 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
@@ -57,15 +64,9 @@ import org.apache.ambari.server.state.State;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.Validate;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
 
 /**
  * Resource provider for component resources.
@@ -379,7 +380,6 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
       }
 
       s.addServiceComponent(sc);
-      sc.persist();
     }
   }
 

+ 16 - 22
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java

@@ -17,9 +17,18 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import com.google.inject.Inject;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
@@ -60,23 +69,14 @@ import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.Validate;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Inject;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
 
 /**
  * Resource provider for service resources.
@@ -344,18 +344,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
-    ServiceFactory serviceFactory = getManagementController().getServiceFactory();
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
-      State state = State.INIT;
-
       // Already checked that service does not exist
-      Service s = serviceFactory.createNew(cluster, request.getServiceName());
+      Service s = cluster.addService(request.getServiceName());
 
-      s.setDesiredState(state);
-      s.setDesiredStackVersion(cluster.getDesiredStackVersion());
-      s.persist();
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java

@@ -360,7 +360,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
           // rollback
           // As we operate inside with cluster data, any cache which belongs to
           // cluster need to be flushed
-          cluster.invalidateData();
+          clusters.get().invalidate(cluster);
           throw e;
         }
       }

+ 69 - 53
ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertServiceStateListener.java

@@ -20,6 +20,7 @@ package org.apache.ambari.server.events.listeners.alerts;
 import java.text.MessageFormat;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.locks.Lock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.EagerSingleton;
@@ -34,7 +35,6 @@ import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.AlertDispatchDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.AlertGroupEntity;
-import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.alert.AlertDefinition;
 import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.eventbus.AllowConcurrentEvents;
 import com.google.common.eventbus.Subscribe;
+import com.google.common.util.concurrent.Striped;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.Singleton;
@@ -95,7 +96,13 @@ public class AlertServiceStateListener {
    * Used to retrieve a cluster using clusterId from event.
    */
   @Inject
-  private Provider<Clusters> clusters;
+  private Provider<Clusters> m_clusters;
+
+  /**
+   * Used for ensuring that the concurrent nature of the event handler methods
+   * don't collide when attempting to perform operations on the same service.
+   */
+  private Striped<Lock> m_locksByService = Striped.lazyWeakLock(20);
 
   /**
    * Constructor.
@@ -125,38 +132,46 @@ public class AlertServiceStateListener {
     String stackVersion = event.getStackVersion();
     String serviceName = event.getServiceName();
 
-    // create the default alert group for the new service if absent; this MUST
-    // be done before adding definitions so that they are properly added to the
-    // default group
-    if (null == m_alertDispatchDao.findDefaultServiceGroup(clusterId, serviceName)) {
-      try {
-        m_alertDispatchDao.createDefaultGroup(clusterId, serviceName);
-      } catch (AmbariException ambariException) {
-        LOG.error("Unable to create a default alert group for {}",
-          event.getServiceName(), ambariException);
-      }
-    }
+    Lock lock = m_locksByService.get(serviceName);
+    lock.lock();
 
-    // populate alert definitions for the new service from the database, but
-    // don't worry about sending down commands to the agents; the host
-    // components are not yet bound to the hosts so we'd have no way of knowing
-    // which hosts are invalidated; do that in another impl
     try {
-      Set<AlertDefinition> alertDefinitions = m_metaInfoProvider.get().getAlertDefinitions(
-          stackName, stackVersion, serviceName);
+      // create the default alert group for the new service if absent; this MUST
+      // be done before adding definitions so that they are properly added to the
+      // default group
+      if (null == m_alertDispatchDao.findDefaultServiceGroup(clusterId, serviceName)) {
+        try {
+          m_alertDispatchDao.createDefaultGroup(clusterId, serviceName);
+        } catch (AmbariException ambariException) {
+          LOG.error("Unable to create a default alert group for {}",
+            event.getServiceName(), ambariException);
+        }
+      }
 
-      for (AlertDefinition definition : alertDefinitions) {
-        AlertDefinitionEntity entity = m_alertDefinitionFactory.coerce(
-            clusterId,
-            definition);
+      // populate alert definitions for the new service from the database, but
+      // don't worry about sending down commands to the agents; the host
+      // components are not yet bound to the hosts so we'd have no way of knowing
+      // which hosts are invalidated; do that in another impl
+      try {
+        Set<AlertDefinition> alertDefinitions = m_metaInfoProvider.get().getAlertDefinitions(
+            stackName, stackVersion, serviceName);
 
-        m_definitionDao.create(entity);
+        for (AlertDefinition definition : alertDefinitions) {
+          AlertDefinitionEntity entity = m_alertDefinitionFactory.coerce(
+              clusterId,
+              definition);
+
+          m_definitionDao.create(entity);
+        }
+      } catch (AmbariException ae) {
+        String message = MessageFormat.format(
+            "Unable to populate alert definitions from the database during installation of {0}",
+            serviceName);
+        LOG.error(message, ae);
       }
-    } catch (AmbariException ae) {
-      String message = MessageFormat.format(
-          "Unable to populate alert definitions from the database during installation of {0}",
-          serviceName);
-      LOG.error(message, ae);
+    }
+    finally {
+      lock.unlock();
     }
   }
 
@@ -170,43 +185,44 @@ public class AlertServiceStateListener {
   @AllowConcurrentEvents
   public void onAmbariEvent(ServiceRemovedEvent event) {
     LOG.debug("Received event {}", event);
-    Cluster cluster = null;
 
     try {
-      cluster = clusters.get().getClusterById(event.getClusterId());
+      m_clusters.get().getClusterById(event.getClusterId());
     } catch (AmbariException e) {
-      LOG.warn("Unable to retrieve cluster info for id: " + event.getClusterId());
+      LOG.warn("Unable to retrieve cluster with id {}", event.getClusterId());
+      return;
     }
 
-    if (cluster != null) {
-      // TODO: Explicit locking used to prevent deadlock situation caused during cluster delete
-      cluster.getClusterGlobalLock().writeLock().lock();
-      try {
-        List<AlertDefinitionEntity> definitions = m_definitionDao.findByService(event.getClusterId(),
+    String serviceName = event.getServiceName();
+    Lock lock = m_locksByService.get(serviceName);
+    lock.lock();
+
+    try {
+      List<AlertDefinitionEntity> definitions = m_definitionDao.findByService(event.getClusterId(),
           event.getServiceName());
 
-        for (AlertDefinitionEntity definition : definitions) {
-          try {
-            m_definitionDao.remove(definition);
-          } catch (Exception exception) {
-            LOG.error("Unable to remove alert definition {}", definition.getDefinitionName(), exception);
-          }
+      for (AlertDefinitionEntity definition : definitions) {
+        try {
+          m_definitionDao.remove(definition);
+        } catch (Exception exception) {
+          LOG.error("Unable to remove alert definition {}", definition.getDefinitionName(),
+              exception);
         }
+      }
 
-        // remove the default group for the service
-        AlertGroupEntity group = m_alertDispatchDao.findGroupByName(event.getClusterId(),
+      // remove the default group for the service
+      AlertGroupEntity group = m_alertDispatchDao.findGroupByName(event.getClusterId(),
           event.getServiceName());
 
-        if (null != group && group.isDefault()) {
-          try {
-            m_alertDispatchDao.remove(group);
-          } catch (Exception exception) {
-            LOG.error("Unable to remove default alert group {}", group.getGroupName(), exception);
-          }
+      if (null != group && group.isDefault()) {
+        try {
+          m_alertDispatchDao.remove(group);
+        } catch (Exception exception) {
+          LOG.error("Unable to remove default alert group {}", group.getGroupName(), exception);
         }
-      } finally {
-        cluster.getClusterGlobalLock().writeLock().unlock();
       }
+    } finally {
+      lock.unlock();
     }
   }
 }

+ 69 - 68
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ConfigGroupHostMappingDAO.java

@@ -17,10 +17,16 @@
  */
 package org.apache.ambari.server.orm.dao;
 
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.RequiresSession;
@@ -40,16 +46,10 @@ import org.apache.ambari.server.state.host.HostFactory;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.collections.Predicate;
 
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.HashMap;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
 
 @Singleton
 public class ConfigGroupHostMappingDAO {
@@ -65,16 +65,16 @@ public class ConfigGroupHostMappingDAO {
   private HostFactory hostFactory;
   @Inject
   Clusters clusters;
-  
+
   private final ReadWriteLock gl = new ReentrantReadWriteLock();
-  
+
   private Map<Long, Set<ConfigGroupHostMapping>> configGroupHostMappingByHost;
-  
+
   private volatile boolean cacheLoaded;
 
-  
+
   private void populateCache() {
-    
+
     if (!cacheLoaded) {
       gl.writeLock().lock();
       try {
@@ -106,10 +106,10 @@ public class ConfigGroupHostMappingDAO {
       } finally {
         gl.writeLock().unlock();
       }
-      
+
 
     }
-    
+
   }
 
   /**
@@ -121,50 +121,51 @@ public class ConfigGroupHostMappingDAO {
   @RequiresSession
   public ConfigGroupHostMappingEntity findByPK(final ConfigGroupHostMappingEntityPK
         configGroupHostMappingEntityPK) {
-    
+
     return entityManagerProvider.get()
       .find(ConfigGroupHostMappingEntity.class, configGroupHostMappingEntityPK);
   }
 
   @RequiresSession
   public Set<ConfigGroupHostMapping> findByHostId(Long hostId) {
-    
+
     populateCache();
-    
-    if (!configGroupHostMappingByHost.containsKey(hostId))
+
+    if (!configGroupHostMappingByHost.containsKey(hostId)) {
       return null;
-    
+    }
+
     Set<ConfigGroupHostMapping> set = new HashSet<ConfigGroupHostMapping>(configGroupHostMappingByHost.get(hostId));
-    
+
     return set;
-    
+
   }
 
   @RequiresSession
   public Set<ConfigGroupHostMapping> findByGroup(final Long groupId) {
-    
+
     populateCache();
-    
+
     Set<ConfigGroupHostMapping> result = new HashSet<ConfigGroupHostMapping>();
-    
+
     for (Set<ConfigGroupHostMapping> item : configGroupHostMappingByHost.values()) {
-      
+
       Set<ConfigGroupHostMapping> setByHost = new HashSet<ConfigGroupHostMapping>(item);
-      
+
       CollectionUtils.filter(setByHost, new Predicate() {
-        
+
         @Override
         public boolean evaluate(Object arg0) {
           return ((ConfigGroupHostMapping) arg0).getConfigGroupId().equals(groupId);
         }
       });
-      
+
       result.addAll(setByHost);
-      
+
     }
-    
+
     return result;
-    
+
   }
 
   @RequiresSession
@@ -178,33 +179,33 @@ public class ConfigGroupHostMappingDAO {
     populateCache();
 
     entityManagerProvider.get().persist(configGroupHostMappingEntity);
-    
+
     //create in cache
     Set<ConfigGroupHostMapping> set = configGroupHostMappingByHost.get(configGroupHostMappingEntity.getHostId());
     if (set == null){
       set = new HashSet<ConfigGroupHostMapping>();
       configGroupHostMappingByHost.put(configGroupHostMappingEntity.getHostId(), set);
     }
-    
+
     set.add(buildConfigGroupHostMapping(configGroupHostMappingEntity));
   }
 
   @Transactional
   public ConfigGroupHostMappingEntity merge(ConfigGroupHostMappingEntity configGroupHostMappingEntity) {
-    
+
     populateCache();
-    
+
     Set<ConfigGroupHostMapping> set = configGroupHostMappingByHost.get(configGroupHostMappingEntity.getHostId());
     if (set == null){
       set = new HashSet<ConfigGroupHostMapping>();
       configGroupHostMappingByHost.put(configGroupHostMappingEntity.getHostId(), set);
     }
-    
+
     //Update object in set
     set.remove(buildConfigGroupHostMapping(configGroupHostMappingEntity));
     set.add(buildConfigGroupHostMapping(configGroupHostMappingEntity));
-    
-    
+
+
     return entityManagerProvider.get().merge(configGroupHostMappingEntity);
   }
 
@@ -213,23 +214,23 @@ public class ConfigGroupHostMappingDAO {
                          configGroupHostMappingEntity) {
     cacheLoaded = false;
     populateCache();
-    
+
     entityManagerProvider.get().refresh(configGroupHostMappingEntity);
   }
 
   @Transactional
   public void remove(final ConfigGroupHostMappingEntity
                          configGroupHostMappingEntity) {
-    
+
     populateCache();
-    
+
     entityManagerProvider.get().remove(merge(configGroupHostMappingEntity));
-    
+
     Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(configGroupHostMappingEntity.getHostId());
-    
+
     if (setByHost != null) {
       CollectionUtils.filter(setByHost, new Predicate() {
-        
+
         @Override
         public boolean evaluate(Object arg0) {
           return !((ConfigGroupHostMapping) arg0).getConfigGroupId().
@@ -243,14 +244,14 @@ public class ConfigGroupHostMappingDAO {
   public void removeByPK(final ConfigGroupHostMappingEntityPK
                          configGroupHostMappingEntityPK) {
     populateCache();
-    
+
     entityManagerProvider.get().remove(findByPK(configGroupHostMappingEntityPK));
-    
+
     Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(configGroupHostMappingEntityPK.getHostId());
-    
+
     if (setByHost != null) {
       CollectionUtils.filter(setByHost, new Predicate() {
-        
+
         @Override
         public boolean evaluate(Object arg0) {
           return !((ConfigGroupHostMapping) arg0).getConfigGroupId().
@@ -258,7 +259,7 @@ public class ConfigGroupHostMappingDAO {
         }
       });
     }
-    
+
   }
 
   @Transactional
@@ -273,18 +274,18 @@ public class ConfigGroupHostMappingDAO {
     // Flush to current transaction required in order to avoid Eclipse link
     // from re-ordering delete
     entityManagerProvider.get().flush();
-    
+
     for (Set<ConfigGroupHostMapping> setByHost : configGroupHostMappingByHost.values()) {
-      
+
       CollectionUtils.filter(setByHost, new Predicate() {
-        
+
         @Override
         public boolean evaluate(Object arg0) {
           return !((ConfigGroupHostMapping) arg0).getConfigGroupId().equals(groupId);
         }
       });
     }
-    
+
   }
 
   @Transactional
@@ -294,22 +295,22 @@ public class ConfigGroupHostMappingDAO {
         "confighosts.hostId = ?1", String.class);
 
     daoUtils.executeUpdate(query, hostId);
-    
-    
+
+
     Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(hostId);
-    
+
     setByHost.clear();
   }
-  
+
   private ConfigGroupHostMapping buildConfigGroupHostMapping(
       ConfigGroupHostMappingEntity configGroupHostMappingEntity) {
-    
+
     ConfigGroupHostMappingImpl configGroupHostMapping = new ConfigGroupHostMappingImpl();
     configGroupHostMapping.setConfigGroup(buildConfigGroup(configGroupHostMappingEntity.getConfigGroupEntity()));
     configGroupHostMapping.setConfigGroupId(configGroupHostMappingEntity.getConfigGroupId());
     configGroupHostMapping.setHost(buildHost(configGroupHostMappingEntity.getHostEntity()));
     configGroupHostMapping.setHostId(configGroupHostMappingEntity.getHostId());
-    
+
     return configGroupHostMapping;
   }
 
@@ -321,12 +322,12 @@ public class ConfigGroupHostMappingDAO {
       //almost impossible
     }
     ConfigGroup configGroup = configGroupFactory.createExisting(cluster, configGroupEntity);
-    
+
     return configGroup;
   }
 
   private Host buildHost(HostEntity hostEntity) {
-    Host host = hostFactory.create(hostEntity, false);
+    Host host = hostFactory.create(hostEntity);
     return host;
   }
 }

+ 3 - 4
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java

@@ -95,17 +95,16 @@ public class HostComponentDesiredStateDAO {
 
   @Transactional
   public void remove(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) {
-    HostEntity hostEntity = hostDAO.findById(hostComponentDesiredStateEntity.getHostId());
+    HostEntity hostEntity = hostComponentDesiredStateEntity.getHostEntity();
 
     if (hostEntity == null) {
       throw new IllegalStateException(String.format("Missing hostEntity for host id %1d",
               hostComponentDesiredStateEntity.getHostId()));
     }
 
-    entityManagerProvider.get().remove(merge(hostComponentDesiredStateEntity));
-
-    // Make sure that the state entity is removed from its host entity
     hostEntity.removeHostComponentDesiredStateEntity(hostComponentDesiredStateEntity);
+
+    entityManagerProvider.get().remove(hostComponentDesiredStateEntity);
     hostDAO.merge(hostEntity);
   }
 

+ 1 - 8
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java

@@ -27,7 +27,6 @@ import javax.persistence.TypedQuery;
 
 import org.apache.ambari.server.orm.RequiresSession;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.UpgradeState;
 
 import com.google.inject.Inject;
@@ -177,13 +176,7 @@ public class HostComponentStateDAO {
 
   @Transactional
   public void remove(HostComponentStateEntity hostComponentStateEntity) {
-    HostEntity hostEntity = hostDAO.findByName(hostComponentStateEntity.getHostName());
-
-    entityManagerProvider.get().remove(merge(hostComponentStateEntity));
-
-    // Make sure that the state entity is removed from its host entity
-    hostEntity.removeHostComponentStateEntity(hostComponentStateEntity);
-    hostDAO.merge(hostEntity);
+    entityManagerProvider.get().remove(hostComponentStateEntity);
   }
 
   /**

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java

@@ -112,7 +112,7 @@ public class HostDAO {
 
   @Transactional
   public void remove(HostEntity hostEntity) {
-    entityManagerProvider.get().remove(merge(hostEntity));
+    entityManagerProvider.get().remove(hostEntity);
   }
 
   public List<String> getHostNamesByHostIds(List<Long> hostIds) {

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java

@@ -49,7 +49,7 @@ public class ClusterServiceEntity {
   @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false)
   private ClusterEntity clusterEntity;
 
-  @OneToOne(mappedBy = "clusterServiceEntity")
+  @OneToOne(mappedBy = "clusterServiceEntity", cascade = { CascadeType.PERSIST, CascadeType.MERGE })
   private ServiceDesiredStateEntity serviceDesiredStateEntity;
 
   @OneToMany(mappedBy = "clusterServiceEntity")

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java

@@ -37,6 +37,8 @@ import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.State;
 
+import com.google.common.base.Objects;
+
 @javax.persistence.IdClass(HostComponentDesiredStateEntityPK.class)
 @javax.persistence.Table(name = "hostcomponentdesiredstate")
 @Entity
@@ -255,4 +257,13 @@ public class HostComponentDesiredStateEntity {
   public void setRestartRequired(boolean restartRequired) {
     this.restartRequired = (restartRequired == false ? 0 : 1);
   }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this).add("serviceName", serviceName).add("componentName",
+        componentName).add("hostId", hostId).add("desiredState", desiredState).toString();
+  }
 }

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java

@@ -38,6 +38,8 @@ import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.UpgradeState;
 
+import com.google.common.base.Objects;
+
 @Entity
 @Table(name = "hostcomponentstate")
 @TableGenerator(
@@ -283,4 +285,13 @@ public class HostComponentStateEntity {
     this.hostEntity = hostEntity;
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this).add("serviceName", serviceName).add("componentName",
+        componentName).add("hostId", hostId).add("state", currentState).toString();
+  }
+
 }

+ 0 - 12
ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java

@@ -22,7 +22,6 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ClusterResponse;
@@ -521,12 +520,6 @@ public interface Cluster {
    */
   Service addService(String serviceName) throws AmbariException;
 
-  /**
-   * Get lock to control access to cluster structure
-   * @return cluster-global lock
-   */
-  ReadWriteLock getClusterGlobalLock();
-
   /**
    * Fetch desired configs for list of hosts in cluster
    * @param hostIds
@@ -669,11 +662,6 @@ public interface Cluster {
    */
   void removeConfigurations(StackId stackId);
 
-  /**
-   * Clear cluster caches and re-read data from database
-   */
-  void invalidateData();
-
   /**
    * Returns whether this cluster was provisioned by a Blueprint or not.
    * @return true if the cluster was deployed with a Blueprint otherwise false.

+ 9 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java

@@ -278,4 +278,13 @@ public interface Clusters {
    */
   int getClusterSize(String clusterName);
 
+  /**
+   * Invalidates the specified cluster by retrieving it from the database and
+   * refreshing all of the internal stateful collections.
+   *
+   * @param cluster
+   *          the cluster to invalidate and refresh (not {@code null}).
+   */
+  void invalidate(Cluster cluster);
+
 }

+ 45 - 53
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java

@@ -27,9 +27,6 @@ import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.ambari.annotations.TransactionalLock;
-import org.apache.ambari.annotations.TransactionalLock.LockArea;
-import org.apache.ambari.annotations.TransactionalLock.LockType;
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
@@ -356,69 +353,64 @@ public class ConfigImpl implements Config {
   @Override
   @Transactional
   public void persist(boolean newConfig) {
-    cluster.getClusterGlobalLock().writeLock().lock(); //null cluster is not expected, NPE anyway later in code
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
-        if (newConfig) {
-          ClusterConfigEntity entity = new ClusterConfigEntity();
-          entity.setClusterEntity(clusterEntity);
-          entity.setClusterId(cluster.getClusterId());
-          entity.setType(getType());
-          entity.setVersion(getVersion());
-          entity.setTag(getTag());
-          entity.setTimestamp(new Date().getTime());
-          entity.setStack(clusterEntity.getDesiredStack());
-          entity.setData(gson.toJson(getProperties()));
+      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+
+      if (newConfig) {
+        ClusterConfigEntity entity = new ClusterConfigEntity();
+        entity.setClusterEntity(clusterEntity);
+        entity.setClusterId(cluster.getClusterId());
+        entity.setType(getType());
+        entity.setVersion(getVersion());
+        entity.setTag(getTag());
+        entity.setTimestamp(new Date().getTime());
+        entity.setStack(clusterEntity.getDesiredStack());
+        entity.setData(gson.toJson(getProperties()));
+
+        if (null != getPropertiesAttributes()) {
+          entity.setAttributes(gson.toJson(getPropertiesAttributes()));
+        }
 
-          if (null != getPropertiesAttributes()) {
-            entity.setAttributes(gson.toJson(getPropertiesAttributes()));
+        clusterDAO.createConfig(entity);
+        clusterEntity.getClusterConfigEntities().add(entity);
+
+        // save the entity, forcing a flush to ensure the refresh picks up the
+        // newest data
+        clusterDAO.merge(clusterEntity, true);
+      } else {
+        // only supporting changes to the properties
+        ClusterConfigEntity entity = null;
+
+        // find the existing configuration to update
+        for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
+          if (getTag().equals(cfe.getTag()) && getType().equals(cfe.getType())
+              && getVersion().equals(cfe.getVersion())) {
+            entity = cfe;
+            break;
           }
+        }
+
+        // if the configuration was found, then update it
+        if (null != entity) {
+          LOG.debug(
+              "Updating {} version {} with new configurations; a new version will not be created",
+              getType(), getVersion());
 
-          clusterDAO.createConfig(entity);
-          clusterEntity.getClusterConfigEntities().add(entity);
+          entity.setData(gson.toJson(getProperties()));
 
           // save the entity, forcing a flush to ensure the refresh picks up the
           // newest data
           clusterDAO.merge(clusterEntity, true);
-          cluster.refresh();
-        } else {
-          // only supporting changes to the properties
-          ClusterConfigEntity entity = null;
-
-          // find the existing configuration to update
-          for (ClusterConfigEntity cfe : clusterEntity.getClusterConfigEntities()) {
-            if (getTag().equals(cfe.getTag()) &&
-                getType().equals(cfe.getType()) &&
-                getVersion().equals(cfe.getVersion())) {
-              entity = cfe;
-              break;
-            }
-          }
-
-          // if the configuration was found, then update it
-          if (null != entity) {
-            LOG.debug(
-                    "Updating {} version {} with new configurations; a new version will not be created",
-                    getType(), getVersion());
-
-            entity.setData(gson.toJson(getProperties()));
-
-            // save the entity, forcing a flush to ensure the refresh picks up the
-            // newest data
-            clusterDAO.merge(clusterEntity, true);
-            cluster.refresh();
-          }
         }
-      } finally {
-        readWriteLock.writeLock().unlock();
       }
     } finally {
-      cluster.getClusterGlobalLock().writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
 
+    // re-load the entity associations for the cluster
+    cluster.refresh();
+
     // broadcast the change event for the configuration
     ClusterConfigChangedEvent event = new ClusterConfigChangedEvent(cluster.getClusterName(),
         getType(), getTag(), getVersion());

+ 0 - 11
ambari-server/src/main/java/org/apache/ambari/server/state/Host.java

@@ -42,11 +42,6 @@ public interface Host extends Comparable {
    */
   Long getHostId();
 
-  /**
-   * @param hostName the hostName to set
-   */
-  void setHostName(String hostName);
-
   /**
    * @return the currentPingPort
    */
@@ -338,12 +333,6 @@ public interface Host extends Comparable {
 
   HostResponse convertToResponse();
 
-  boolean isPersisted();
-
-  void persist();
-
-  void refresh();
-
   void importHostInfo(HostInfo hostInfo);
 
   /**

+ 7 - 5
ambari-server/src/main/java/org/apache/ambari/server/state/HostConfig.java

@@ -17,12 +17,13 @@
  */
 package org.apache.ambari.server.state;
 
-import com.google.common.base.Objects;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 
-import java.util.HashMap;
-import java.util.Map;
+import com.google.common.base.Objects;
 
 /**
  * Data structure that hangs off of the Host and points to what tags are
@@ -30,7 +31,7 @@ import java.util.Map;
  */
 
 public class HostConfig {
-  private final Map<Long, String> configGroupOverrides = new HashMap<Long, String>();
+  private final Map<Long, String> configGroupOverrides = new ConcurrentHashMap<Long, String>();
   private String defaultVersionTag;
 
   public HostConfig() {
@@ -67,8 +68,9 @@ public class HostConfig {
       sb.append(", overrides = [ ");
       int i = 0;
       for (Map.Entry<Long, String> entry : configGroupOverrides.entrySet()) {
-        if (i++ != 0)
+        if (i++ != 0) {
           sb.append(", ");
+        }
         sb.append(entry.getKey().toString() + " : " + entry.getValue());
       }
       sb.append("]");

+ 0 - 13
ambari-server/src/main/java/org/apache/ambari/server/state/Service.java

@@ -19,7 +19,6 @@
 package org.apache.ambari.server.state;
 
 import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceResponse;
@@ -73,12 +72,6 @@ public interface Service {
 
   void debugDump(StringBuilder sb);
 
-  boolean isPersisted();
-
-  void persist();
-
-  void refresh();
-
   ServiceComponent addServiceComponent(String serviceComponentName)
       throws AmbariException;
 
@@ -98,12 +91,6 @@ public interface Service {
 
   void delete() throws AmbariException;
 
-  /**
-   * Get lock to control access to cluster structure
-   * @return cluster-global lock
-   */
-  ReadWriteLock getClusterGlobalLock();
-
   /**
    * Sets the maintenance state for the service
    * @param state the state

+ 0 - 13
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java

@@ -19,7 +19,6 @@
 package org.apache.ambari.server.state;
 
 import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
@@ -73,12 +72,6 @@ public interface ServiceComponent {
 
   ServiceComponentResponse convertToResponse();
 
-  void refresh();
-
-  boolean isPersisted();
-
-  void persist();
-
   void debugDump(StringBuilder sb);
 
   boolean isClientComponent();
@@ -98,10 +91,4 @@ public interface ServiceComponent {
       String hostName) throws AmbariException;
 
   void delete() throws AmbariException;
-
-  /**
-   * Get lock to control access to cluster structure
-   * @return cluster-global lock
-   */
-  ReadWriteLock getClusterGlobalLock();
 }

+ 0 - 6
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java

@@ -188,12 +188,6 @@ public interface ServiceComponentHost {
    */
   ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs);
 
-  boolean isPersisted();
-
-  void persist();
-
-  void refresh();
-
   void debugDump(StringBuilder sb);
 
   boolean canBeRemoved();

+ 246 - 473
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java

@@ -18,12 +18,14 @@
 
 package org.apache.ambari.server.state;
 
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ObjectNotFoundException;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
@@ -46,66 +48,71 @@ import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.inject.ProvisionException;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
 
 public class ServiceComponentImpl implements ServiceComponent {
 
   private final static Logger LOG =
       LoggerFactory.getLogger(ServiceComponentImpl.class);
   private final Service service;
-  private final ReadWriteLock clusterGlobalLock;
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   private final String componentName;
   private final String displayName;
   private final boolean isClientComponent;
   private final boolean isMasterComponent;
   private final boolean isVersionAdvertised;
-  volatile boolean persisted = false;
-  @Inject
-  private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-  @Inject
-  private ClusterServiceDAO clusterServiceDAO;
-  @Inject
-  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  @Inject
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  private AmbariEventPublisher eventPublisher;
-
-  ServiceComponentDesiredStateEntity desiredStateEntity;
-  private Map<String, ServiceComponentHost> hostComponents;
+
+  private final ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
+
+  private final ClusterServiceDAO clusterServiceDAO;
+
+  private final ServiceComponentHostFactory serviceComponentHostFactory;
+
+  private final AmbariEventPublisher eventPublisher;
+
+  private final ConcurrentMap<String, ServiceComponentHost> hostComponents = new ConcurrentHashMap<String, ServiceComponentHost>();
+
+  /**
+   * The ID of the persisted {@link ServiceComponentDesiredStateEntity}.
+   */
+  private final long desiredStateEntityId;
 
   /**
    * Data access object used for lookup up stacks.
    */
-  @Inject
-  private StackDAO stackDAO;
+  private final StackDAO stackDAO;
 
   @AssistedInject
-  public ServiceComponentImpl(@Assisted Service service,
-                              @Assisted String componentName, Injector injector) throws AmbariException {
-    injector.injectMembers(this);
-    clusterGlobalLock = service.getClusterGlobalLock();
+  public ServiceComponentImpl(@Assisted Service service, @Assisted String componentName,
+      AmbariMetaInfo ambariMetaInfo,
+      ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO,
+      ClusterServiceDAO clusterServiceDAO, ServiceComponentHostFactory serviceComponentHostFactory,
+      StackDAO stackDAO, AmbariEventPublisher eventPublisher)
+      throws AmbariException {
+
     this.service = service;
+    this.componentName = componentName;
+    this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO;
+    this.clusterServiceDAO = clusterServiceDAO;
+    this.serviceComponentHostFactory = serviceComponentHostFactory;
+    this.stackDAO = stackDAO;
+    this.eventPublisher = eventPublisher;
+
+    StackId stackId = service.getDesiredStackVersion();
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
 
-    desiredStateEntity = new ServiceComponentDesiredStateEntity();
+    ServiceComponentDesiredStateEntity desiredStateEntity = new ServiceComponentDesiredStateEntity();
     desiredStateEntity.setComponentName(componentName);
     desiredStateEntity.setDesiredState(State.INIT);
     desiredStateEntity.setDesiredVersion(State.UNKNOWN.toString());
     desiredStateEntity.setServiceName(service.getName());
     desiredStateEntity.setClusterId(service.getClusterId());
     desiredStateEntity.setRecoveryEnabled(false);
-    setDesiredStackVersion(service.getDesiredStackVersion());
-
-    hostComponents = new HashMap<String, ServiceComponentHost>();
+    desiredStateEntity.setDesiredStack(stackEntity);
 
-    StackId stackId = service.getDesiredStackVersion();
     try {
       ComponentInfo compInfo = ambariMetaInfo.getComponent(stackId.getStackName(),
           stackId.getStackVersion(), service.getName(), componentName);
@@ -121,18 +128,29 @@ public class ServiceComponentImpl implements ServiceComponent {
           + ", componentName=" + componentName
           + ", stackInfo=" + stackId.getStackId());
     }
-    this.componentName = componentName;
+
+    persistEntities(desiredStateEntity);
+    desiredStateEntityId = desiredStateEntity.getId();
   }
 
   @AssistedInject
   public ServiceComponentImpl(@Assisted Service service,
-                              @Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
-                              Injector injector) throws AmbariException {
-    injector.injectMembers(this);
-    clusterGlobalLock = service.getClusterGlobalLock();
+      @Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
+      AmbariMetaInfo ambariMetaInfo,
+      ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO,
+      ClusterServiceDAO clusterServiceDAO,
+      HostComponentDesiredStateDAO hostComponentDesiredStateDAO,
+      ServiceComponentHostFactory serviceComponentHostFactory, StackDAO stackDAO,
+      AmbariEventPublisher eventPublisher)
+      throws AmbariException {
     this.service = service;
+    this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO;
+    this.clusterServiceDAO = clusterServiceDAO;
+    this.serviceComponentHostFactory = serviceComponentHostFactory;
+    this.stackDAO = stackDAO;
+    this.eventPublisher = eventPublisher;
 
-    desiredStateEntity = serviceComponentDesiredStateEntity;
+    desiredStateEntityId = serviceComponentDesiredStateEntity.getId();
     componentName = serviceComponentDesiredStateEntity.getComponentName();
 
     StackId stackId = service.getDesiredStackVersion();
@@ -153,8 +171,7 @@ public class ServiceComponentImpl implements ServiceComponent {
         + ", stackInfo=" + stackId.getStackId());
     }
 
-    hostComponents = new HashMap<String, ServiceComponentHost>();
-    for (HostComponentStateEntity hostComponentStateEntity : desiredStateEntity.getHostComponentStateEntities()) {
+    for (HostComponentStateEntity hostComponentStateEntity : serviceComponentDesiredStateEntity.getHostComponentStateEntities()) {
       HostComponentDesiredStateEntityPK pk = new HostComponentDesiredStateEntityPK();
       pk.setClusterId(hostComponentStateEntity.getClusterId());
       pk.setServiceName(hostComponentStateEntity.getServiceName());
@@ -174,13 +191,6 @@ public class ServiceComponentImpl implements ServiceComponent {
         ex.printStackTrace();
       }
     }
-
-    persisted = true;
-  }
-
-  @Override
-  public ReadWriteLock getClusterGlobalLock() {
-    return clusterGlobalLock;
   }
 
   @Override
@@ -195,7 +205,9 @@ public class ServiceComponentImpl implements ServiceComponent {
    */
   @Override
   public boolean isRecoveryEnabled() {
-    ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
     if (desiredStateEntity != null) {
       return desiredStateEntity.isRecoveryEnabled();
     } else {
@@ -213,33 +225,29 @@ public class ServiceComponentImpl implements ServiceComponent {
    */
   @Override
   public void setRecoveryEnabled(boolean recoveryEnabled) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting RecoveryEnabled of Component" + ", clusterName="
-                + service.getCluster().getClusterName() + ", clusterId="
-                + service.getCluster().getClusterId() + ", serviceName="
-                + service.getName() + ", componentName=" + getName()
-                + ", oldRecoveryEnabled=" + isRecoveryEnabled() + ", newRecoveryEnabled="
-                + recoveryEnabled);
-      }
-      ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
-      if (desiredStateEntity != null) {
-        desiredStateEntity.setRecoveryEnabled(recoveryEnabled);
-        saveIfPersisted(desiredStateEntity);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting RecoveryEnabled of Component" + ", clusterName="
+          + service.getCluster().getClusterName() + ", clusterId="
+          + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+          + ", componentName=" + getName() + ", oldRecoveryEnabled=" + isRecoveryEnabled()
+          + ", newRecoveryEnabled=" + recoveryEnabled);
+    }
 
-        // broadcast the change
-        ServiceComponentRecoveryChangedEvent event = new ServiceComponentRecoveryChangedEvent(
-                getClusterName(), getServiceName(), getName(), isRecoveryEnabled());
-        eventPublisher.publish(event);
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
 
-      } else {
-        LOG.warn("Setting a member on an entity object that may have been " +
-                "previously deleted, serviceName = " + service.getName());
-      }
+    if (desiredStateEntity != null) {
+      desiredStateEntity.setRecoveryEnabled(recoveryEnabled);
+      desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
 
-    } finally {
-      readWriteLock.writeLock().unlock();
+      // broadcast the change
+      ServiceComponentRecoveryChangedEvent event = new ServiceComponentRecoveryChangedEvent(
+          getClusterName(), getServiceName(), getName(), isRecoveryEnabled());
+      eventPublisher.publish(event);
+
+    } else {
+      LOG.warn("Setting a member on an entity object that may have been "
+          + "previously deleted, serviceName = " + service.getName());
     }
   }
 
@@ -255,151 +263,80 @@ public class ServiceComponentImpl implements ServiceComponent {
 
   @Override
   public Map<String, ServiceComponentHost> getServiceComponentHosts() {
-    clusterGlobalLock.readLock().lock();
-    try {
-      readWriteLock.readLock().lock();
-      try {
-        return new HashMap<String, ServiceComponentHost>(hostComponents);
-      } finally {
-        readWriteLock.readLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.readLock().unlock();
-    }
+    return new HashMap<String, ServiceComponentHost>(hostComponents);
   }
 
   @Override
   public void addServiceComponentHosts(
       Map<String, ServiceComponentHost> hostComponents) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        for (Entry<String, ServiceComponentHost> entry :
-            hostComponents.entrySet()) {
-          if (!entry.getKey().equals(entry.getValue().getHostName())) {
-            throw new AmbariException("Invalid arguments in map"
-                + ", hostname does not match the key in map");
-          }
-        }
-        for (ServiceComponentHost sch : hostComponents.values()) {
-          addServiceComponentHost(sch);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+    // TODO validation
+    for (Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+      if (!entry.getKey().equals(entry.getValue().getHostName())) {
+        throw new AmbariException(
+            "Invalid arguments in map" + ", hostname does not match the key in map");
       }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
+    }
+
+    for (ServiceComponentHost sch : hostComponents.values()) {
+      addServiceComponentHost(sch);
     }
   }
 
   @Override
   public void addServiceComponentHost(
       ServiceComponentHost hostComponent) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        // TODO ensure host belongs to cluster
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", hostname=" + hostComponent.getHostName()
-              + ", recoveryEnabled=" + isRecoveryEnabled());
-        }
-        if (hostComponents.containsKey(hostComponent.getHostName())) {
-          throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", hostname=" + hostComponent.getHostName()
-              + ", recoveryEnabled=" + isRecoveryEnabled());
-        }
-        // FIXME need a better approach of caching components by host
-        ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-        clusterImpl.addServiceComponentHost(hostComponent);
-        hostComponents.put(hostComponent.getHostName(), hostComponent);
-      } finally {
-        readWriteLock.writeLock().unlock();
+      // TODO validation
+      // TODO ensure host belongs to cluster
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adding a ServiceComponentHost to ServiceComponent" + ", clusterName="
+            + service.getCluster().getClusterName() + ", clusterId="
+            + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+            + ", serviceComponentName=" + getName() + ", hostname=" + hostComponent.getHostName()
+            + ", recoveryEnabled=" + isRecoveryEnabled());
+      }
+
+      if (hostComponents.containsKey(hostComponent.getHostName())) {
+        throw new AmbariException("Cannot add duplicate ServiceComponentHost" + ", clusterName="
+            + service.getCluster().getClusterName() + ", clusterId="
+            + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+            + ", serviceComponentName=" + getName() + ", hostname=" + hostComponent.getHostName()
+            + ", recoveryEnabled=" + isRecoveryEnabled());
       }
+      // FIXME need a better approach of caching components by host
+      ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
+      clusterImpl.addServiceComponentHost(hostComponent);
+      hostComponents.put(hostComponent.getHostName(), hostComponent);
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
   public ServiceComponentHost addServiceComponentHost(String hostName) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        // TODO ensure host belongs to cluster
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", recoveryEnabled=" + isRecoveryEnabled()
-              + ", hostname=" + hostName);
-        }
-        if (hostComponents.containsKey(hostName)) {
-          throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-              + ", clusterName=" + service.getCluster().getClusterName()
-              + ", clusterId=" + service.getCluster().getClusterId()
-              + ", serviceName=" + service.getName()
-              + ", serviceComponentName=" + getName()
-              + ", recoveryEnabled=" + isRecoveryEnabled()
-              + ", hostname=" + hostName);
-        }
-        ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
-        // FIXME need a better approach of caching components by host
-        ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-        clusterImpl.addServiceComponentHost(hostComponent);
-
-        hostComponents.put(hostComponent.getHostName(), hostComponent);
-
-        return hostComponent;
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
+    ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
+    addServiceComponentHost(hostComponent);
+    return hostComponent;
   }
 
   @Override
   public ServiceComponentHost getServiceComponentHost(String hostname)
       throws AmbariException {
-    clusterGlobalLock.readLock().lock();
-    try {
-      readWriteLock.readLock().lock();
-      try {
-        if (!hostComponents.containsKey(hostname)) {
-          throw new ServiceComponentHostNotFoundException(getClusterName(),
-              getServiceName(), getName(), hostname);
-        }
-        return hostComponents.get(hostname);
-      } finally {
-        readWriteLock.readLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.readLock().unlock();
+
+    if (!hostComponents.containsKey(hostname)) {
+      throw new ServiceComponentHostNotFoundException(getClusterName(),
+          getServiceName(), getName(), hostname);
     }
+
+    return hostComponents.get(hostname);
   }
 
   @Override
   public State getDesiredState() {
-    readWriteLock.readLock().lock();
-    try {
-      ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
       if (desiredStateEntity != null) {
         return desiredStateEntity.getDesiredState();
       } else {
@@ -408,124 +345,97 @@ public class ServiceComponentImpl implements ServiceComponent {
           "componentName = " + componentName);
       }
 
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
     return null;
   }
 
   @Override
   public void setDesiredState(State state) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredState of Service" + ", clusterName="
-                + service.getCluster().getClusterName() + ", clusterId="
-                + service.getCluster().getClusterId() + ", serviceName="
-                + service.getName() + ", serviceComponentName=" + getName()
-                + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
-                + state);
-      }
-      ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
-      if (desiredStateEntity != null) {
-        desiredStateEntity.setDesiredState(state);
-        saveIfPersisted(desiredStateEntity);
-      } else {
-        LOG.warn("Setting a member on an entity object that may have been " +
-          "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
-      }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredState of Service" + ", clusterName="
+          + service.getCluster().getClusterName() + ", clusterId="
+          + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName() + ", oldDesiredState=" + getDesiredState()
+          + ", newDesiredState=" + state);
+    }
 
-    } finally {
-      readWriteLock.writeLock().unlock();
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
+    if (desiredStateEntity != null) {
+      desiredStateEntity.setDesiredState(state);
+      desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
+    } else {
+      LOG.warn("Setting a member on an entity object that may have been "
+          + "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
     }
   }
 
   @Override
   public StackId getDesiredStackVersion() {
-    readWriteLock.readLock().lock();
-    try {
-      StackEntity stackEntity = getDesiredStateEntity().getDesiredStack();
-      if (null != stackEntity) {
-        return new StackId(stackEntity.getStackName(),
-            stackEntity.getStackVersion());
-      } else {
-        return null;
-      }
-    } finally {
-      readWriteLock.readLock().unlock();
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
+    StackEntity stackEntity = desiredStateEntity.getDesiredStack();
+    if (null != stackEntity) {
+      return new StackId(stackEntity.getStackName(), stackEntity.getStackVersion());
+    } else {
+      return null;
     }
   }
 
   @Override
   public void setDesiredStackVersion(StackId stack) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
-            + service.getCluster().getClusterName() + ", clusterId="
-            + service.getCluster().getClusterId() + ", serviceName="
-            + service.getName() + ", serviceComponentName=" + getName()
-            + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-            + ", newDesiredStackVersion=" + stack);
-      }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
+          + service.getCluster().getClusterName() + ", clusterId="
+          + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName() + ", oldDesiredStackVersion="
+          + getDesiredStackVersion() + ", newDesiredStackVersion=" + stack);
+    }
 
-      StackEntity stackEntity = stackDAO.find(stack.getStackName(),
-        stack.getStackVersion());
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
 
-      ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
-      if (desiredStateEntity != null) {
-        desiredStateEntity.setDesiredStack(stackEntity);
-        saveIfPersisted(desiredStateEntity);
-      } else {
-        LOG.warn("Setting a member on an entity object that may have been " +
-          "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (desiredStateEntity != null) {
+      StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
+      desiredStateEntity.setDesiredStack(stackEntity);
+      desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
+    } else {
+      LOG.warn("Setting a member on an entity object that may have been "
+          + "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
     }
   }
 
   @Override
   public String getDesiredVersion() {
-    readWriteLock.readLock().lock();
-    try {
-      return getDesiredStateEntity().getDesiredVersion();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
+    return desiredStateEntity.getDesiredVersion();
   }
 
   @Override
   public void setDesiredVersion(String version) {
-    readWriteLock.writeLock().lock();
-    try {
-      ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+    ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
       if (desiredStateEntity != null) {
         desiredStateEntity.setDesiredVersion(version);
-        saveIfPersisted(desiredStateEntity);
+      desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
       } else {
         LOG.warn("Setting a member on an entity object that may have been " +
           "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
       }
-
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
   }
 
   @Override
   public ServiceComponentResponse convertToResponse() {
-    readWriteLock.readLock().lock();
-    try {
-      Cluster cluster = service.getCluster();
-      ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
-          cluster.getClusterName(), service.getName(), getName(),
-          getDesiredStackVersion().getStackId(), getDesiredState().toString(),
-          getServiceComponentStateCount(), isRecoveryEnabled(), displayName);
-      return r;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    Cluster cluster = service.getCluster();
+    ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
+        cluster.getClusterName(), service.getName(), getName(),
+        getDesiredStackVersion().getStackId(), getDesiredState().toString(),
+        getServiceComponentStateCount(), isRecoveryEnabled(), displayName);
+    return r;
   }
 
   @Override
@@ -536,124 +446,36 @@ public class ServiceComponentImpl implements ServiceComponent {
 
   @Override
   public void debugDump(StringBuilder sb) {
-    readWriteLock.readLock().lock();
-    try {
-      sb.append("ServiceComponent={ serviceComponentName=" + getName()
-          + ", recoveryEnabled=" + isRecoveryEnabled()
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName() + ", desiredStackVersion="
-          + getDesiredStackVersion() + ", desiredState="
-          + getDesiredState().toString() + ", hostcomponents=[ ");
-      boolean first = true;
-      for (ServiceComponentHost sch : hostComponents.values()) {
-        if (!first) {
-          sb.append(" , ");
-          first = false;
-        }
-        sb.append("\n        ");
-        sch.debugDump(sb);
-        sb.append(" ");
-      }
-      sb.append(" ] }");
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean isPersisted() {
-    // a lock around this internal state variable is not required since we
-    // have appropriate locks in the persist() method and this member is
-    // only ever false under the condition that the object is new
-    return persisted;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p/>
-   * This method uses Java locks and then delegates to internal methods which
-   * perform the JPA merges inside of a transaction. Because of this, a
-   * transaction is not necessary before this calling this method.
-   */
-  @Override
-  public void persist() {
-    boolean clusterWriteLockAcquired = false;
-    if (!persisted) {
-      clusterGlobalLock.writeLock().lock();
-      clusterWriteLockAcquired = true;
-    }
-
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (!persisted) {
-          // persist the new cluster topology and then release the cluster lock
-          // as it has no more bearing on the rest of this persist() method
-          persistEntities();
-          clusterGlobalLock.writeLock().unlock();
-          clusterWriteLockAcquired = false;
-
-          refresh();
-          // There refresh calls are no longer needed with cached references
-          // not used on getters/setters
-          // service.refresh();
-          persisted = true;
-        } else {
-          saveIfPersisted(desiredStateEntity);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      if (clusterWriteLockAcquired) {
-        clusterGlobalLock.writeLock().unlock();
+    sb.append("ServiceComponent={ serviceComponentName=" + getName() + ", recoveryEnabled="
+        + isRecoveryEnabled() + ", clusterName=" + service.getCluster().getClusterName()
+        + ", clusterId=" + service.getCluster().getClusterId() + ", serviceName="
+        + service.getName() + ", desiredStackVersion=" + getDesiredStackVersion()
+        + ", desiredState=" + getDesiredState().toString() + ", hostcomponents=[ ");
+    boolean first = true;
+    for (ServiceComponentHost sch : hostComponents.values()) {
+      if (!first) {
+        sb.append(" , ");
+        first = false;
       }
+      sb.append("\n        ");
+      sch.debugDump(sb);
+      sb.append(" ");
     }
+    sb.append(" ] }");
   }
 
   @Transactional
-  protected void persistEntities() {
+  protected void persistEntities(ServiceComponentDesiredStateEntity desiredStateEntity) {
     ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
     pk.setClusterId(service.getClusterId());
     pk.setServiceName(service.getName());
     ClusterServiceEntity serviceEntity = clusterServiceDAO.findByPK(pk);
 
-    ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
     desiredStateEntity.setClusterServiceEntity(serviceEntity);
-
     serviceComponentDesiredStateDAO.create(desiredStateEntity);
     serviceEntity = clusterServiceDAO.merge(serviceEntity);
   }
 
-  @Override
-  @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted()) {
-        serviceComponentDesiredStateDAO.refresh(getDesiredStateEntity());
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Merges the encapsulated {@link ServiceComponentDesiredStateEntity} inside
-   * of a new transaction. This method assumes that the appropriate write lock
-   * has already been acquired from {@link #readWriteLock}.
-   */
-  @Transactional
-  void saveIfPersisted(ServiceComponentDesiredStateEntity desiredStateEntity) {
-    if (isPersisted()) {
-      desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
-    }
-  }
-
   @Override
   public boolean isClientComponent() {
     return isClientComponent;
@@ -672,129 +494,89 @@ public class ServiceComponentImpl implements ServiceComponent {
 
   @Override
   public boolean canBeRemoved() {
-    clusterGlobalLock.readLock().lock();
-    try {
-      readWriteLock.readLock().lock();
-      try {
-        // A component can be deleted if all it's host components
-        // can be removed, irrespective of the state of
-        // the component itself
-        for (ServiceComponentHost sch : hostComponents.values()) {
-          if (!sch.canBeRemoved()) {
-            LOG.warn("Found non removable hostcomponent when trying to"
-                + " delete service component"
-                + ", clusterName=" + getClusterName()
-                + ", serviceName=" + getServiceName()
-                + ", componentName=" + getName()
-                + ", state=" + sch.getState()
-                + ", hostname=" + sch.getHostName());
-            return false;
-          }
-        }
-        return true;
-      } finally {
-        readWriteLock.readLock().unlock();
+    // A component can be deleted if all it's host components
+    // can be removed, irrespective of the state of
+    // the component itself
+    for (ServiceComponentHost sch : hostComponents.values()) {
+      if (!sch.canBeRemoved()) {
+        LOG.warn("Found non removable hostcomponent when trying to" + " delete service component"
+            + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName()
+            + ", componentName=" + getName() + ", state=" + sch.getState() + ", hostname="
+            + sch.getHostName());
+        return false;
       }
-    } finally {
-      clusterGlobalLock.readLock().unlock();
     }
+    return true;
   }
 
   @Override
   @Transactional
   public void deleteAllServiceComponentHosts() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        LOG.info("Deleting all servicecomponenthosts for component"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + getServiceName()
-            + ", componentName=" + getName()
-            + ", recoveryEnabled=" + isRecoveryEnabled());
-        for (ServiceComponentHost sch : hostComponents.values()) {
-          if (!sch.canBeRemoved()) {
-            throw new AmbariException("Found non removable hostcomponent "
-                + " when trying to delete"
-                + " all hostcomponents from servicecomponent"
-                + ", clusterName=" + getClusterName()
-                + ", serviceName=" + getServiceName()
-                + ", componentName=" + getName()
-                + ", recoveryEnabled=" + isRecoveryEnabled()
-                + ", hostname=" + sch.getHostName());
-          }
-        }
-
-        for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
-          serviceComponentHost.delete();
+      LOG.info("Deleting all servicecomponenthosts for component" + ", clusterName="
+          + getClusterName() + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+          + ", recoveryEnabled=" + isRecoveryEnabled());
+      for (ServiceComponentHost sch : hostComponents.values()) {
+        if (!sch.canBeRemoved()) {
+          throw new AmbariException("Found non removable hostcomponent " + " when trying to delete"
+              + " all hostcomponents from servicecomponent" + ", clusterName=" + getClusterName()
+              + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+              + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName());
         }
+      }
 
-        hostComponents.clear();
-      } finally {
-        readWriteLock.writeLock().unlock();
+      for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
+        serviceComponentHost.delete();
       }
+
+      hostComponents.clear();
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
   public void deleteServiceComponentHosts(String hostname) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        ServiceComponentHost sch = getServiceComponentHost(hostname);
-        LOG.info("Deleting servicecomponenthost for cluster"
+      ServiceComponentHost sch = getServiceComponentHost(hostname);
+      LOG.info("Deleting servicecomponenthost for cluster" + ", clusterName=" + getClusterName()
+          + ", serviceName=" + getServiceName() + ", componentName=" + getName()
+          + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName());
+      if (!sch.canBeRemoved()) {
+        throw new AmbariException("Could not delete hostcomponent from cluster"
             + ", clusterName=" + getClusterName()
             + ", serviceName=" + getServiceName()
             + ", componentName=" + getName()
             + ", recoveryEnabled=" + isRecoveryEnabled()
             + ", hostname=" + sch.getHostName());
-        if (!sch.canBeRemoved()) {
-          throw new AmbariException("Could not delete hostcomponent from cluster"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + getServiceName()
-              + ", componentName=" + getName()
-              + ", recoveryEnabled=" + isRecoveryEnabled()
-              + ", hostname=" + sch.getHostName());
-        }
-        sch.delete();
-        hostComponents.remove(hostname);
-
-      } finally {
-        readWriteLock.writeLock().unlock();
       }
+      sch.delete();
+      hostComponents.remove(hostname);
+
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
   @Override
   @Transactional
   public void delete() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        deleteAllServiceComponentHosts();
+      deleteAllServiceComponentHosts();
+
+      ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
+          desiredStateEntityId);
+
+      serviceComponentDesiredStateDAO.remove(desiredStateEntity);
 
-        if (persisted) {
-          removeEntities();
-          persisted = false;
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
-  @Transactional
-  protected void removeEntities() throws AmbariException {
-    serviceComponentDesiredStateDAO.remove(getDesiredStateEntity());
-  }
 
   private int getSCHCountByState(State state) {
     int count = 0;
@@ -816,13 +598,4 @@ public class ServiceComponentImpl implements ServiceComponent {
     serviceComponentStateCountMap.put("totalCount", hostComponents.size());
     return serviceComponentStateCountMap;
   }
-
-  // Refresh cached reference after ever setter
-  private ServiceComponentDesiredStateEntity getDesiredStateEntity() {
-    if (!isPersisted()) {
-      return desiredStateEntity;
-    }
-
-    return serviceComponentDesiredStateDAO.findById(desiredStateEntity.getId());
-  }
 }

+ 215 - 427
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java

@@ -18,12 +18,15 @@
 
 package org.apache.ambari.server.state;
 
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -34,7 +37,6 @@ import org.apache.ambari.server.events.ServiceRemovedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
-import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
@@ -43,7 +45,6 @@ import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
-import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
@@ -51,111 +52,107 @@ import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.inject.Inject;
+import com.google.inject.ProvisionException;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
 
 
 public class ServiceImpl implements Service {
-  private final ReadWriteLock clusterGlobalLock;
-  private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
-  // Cached entity has only 1 getter for name
-  private ClusterServiceEntity serviceEntity;
-  private ServiceDesiredStateEntity serviceDesiredStateEntity;
+  private final Lock lock = new ReentrantLock();
   private ServiceDesiredStateEntityPK serviceDesiredStateEntityPK;
   private ClusterServiceEntityPK serviceEntityPK;
 
   private static final Logger LOG = LoggerFactory.getLogger(ServiceImpl.class);
 
-  private volatile boolean persisted = false;
   private final Cluster cluster;
-  private Map<String, ServiceComponent> components;
+  private final ConcurrentMap<String, ServiceComponent> components = new ConcurrentHashMap<>();
   private final boolean isClientOnlyService;
 
   @Inject
   private ServiceConfigDAO serviceConfigDAO;
-  @Inject
-  private ClusterServiceDAO clusterServiceDAO;
-  @Inject
-  private ServiceDesiredStateDAO serviceDesiredStateDAO;
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  private ServiceComponentFactory serviceComponentFactory;
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  private ConfigGroupDAO configGroupDAO;
+
+  private final ClusterServiceDAO clusterServiceDAO;
+  private final ServiceDesiredStateDAO serviceDesiredStateDAO;
+  private final ClusterDAO clusterDAO;
+  private final ServiceComponentFactory serviceComponentFactory;
 
   /**
    * Data access object for retrieving stack instances.
    */
-  @Inject
-  private StackDAO stackDAO;
+  private final StackDAO stackDAO;
 
   /**
    * Used to publish events relating to service CRUD operations.
    */
-  @Inject
-  private AmbariEventPublisher eventPublisher;
+  private final AmbariEventPublisher eventPublisher;
 
-  private void init() {
-    // TODO load from DB during restart?
-  }
+  /**
+   * The name of the service.
+   */
+  private final String serviceName;
 
   @AssistedInject
-  public ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName,
-      Injector injector) throws AmbariException {
-    injector.injectMembers(this);
-    clusterGlobalLock = cluster.getClusterGlobalLock();
-    serviceEntity = new ClusterServiceEntity();
+  ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName, ClusterDAO clusterDAO,
+      ClusterServiceDAO clusterServiceDAO, ServiceDesiredStateDAO serviceDesiredStateDAO,
+      ServiceComponentFactory serviceComponentFactory, StackDAO stackDAO,
+      AmbariMetaInfo ambariMetaInfo, AmbariEventPublisher eventPublisher)
+      throws AmbariException {
+    this.cluster = cluster;
+    this.clusterDAO = clusterDAO;
+    this.clusterServiceDAO = clusterServiceDAO;
+    this.serviceDesiredStateDAO = serviceDesiredStateDAO;
+    this.serviceComponentFactory = serviceComponentFactory;
+    this.stackDAO = stackDAO;
+    this.eventPublisher = eventPublisher;
+    this.serviceName = serviceName;
+
+    ClusterServiceEntity serviceEntity = new ClusterServiceEntity();
     serviceEntity.setClusterId(cluster.getClusterId());
     serviceEntity.setServiceName(serviceName);
-    serviceDesiredStateEntity = new ServiceDesiredStateEntity();
+    ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
     serviceDesiredStateEntity.setServiceName(serviceName);
     serviceDesiredStateEntity.setClusterId(cluster.getClusterId());
-
     serviceDesiredStateEntityPK = getServiceDesiredStateEntityPK(serviceDesiredStateEntity);
     serviceEntityPK = getServiceEntityPK(serviceEntity);
 
     serviceDesiredStateEntity.setClusterServiceEntity(serviceEntity);
     serviceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
 
-    this.cluster = cluster;
-
-    components = new HashMap<String, ServiceComponent>();
-
     StackId stackId = cluster.getDesiredStackVersion();
-    setDesiredStackVersion(stackId);
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+    serviceDesiredStateEntity.setDesiredStack(stackEntity);
 
     ServiceInfo sInfo = ambariMetaInfo.getService(stackId.getStackName(),
         stackId.getStackVersion(), serviceName);
+
     isClientOnlyService = sInfo.isClientOnlyService();
 
-    init();
+    persist(serviceEntity);
   }
 
   @AssistedInject
-  public ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity
-      serviceEntity, Injector injector) throws AmbariException {
-    injector.injectMembers(this);
-    clusterGlobalLock = cluster.getClusterGlobalLock();
-    this.serviceEntity = serviceEntity;
+  ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity serviceEntity,
+      ClusterDAO clusterDAO, ClusterServiceDAO clusterServiceDAO,
+      ServiceDesiredStateDAO serviceDesiredStateDAO,
+      ServiceComponentFactory serviceComponentFactory, StackDAO stackDAO,
+      AmbariMetaInfo ambariMetaInfo, AmbariEventPublisher eventPublisher)
+      throws AmbariException {
     this.cluster = cluster;
-
-    //TODO check for null states?
-    serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
+    this.clusterDAO = clusterDAO;
+    this.clusterServiceDAO = clusterServiceDAO;
+    this.serviceDesiredStateDAO = serviceDesiredStateDAO;
+    this.serviceComponentFactory = serviceComponentFactory;
+    this.stackDAO = stackDAO;
+    this.eventPublisher = eventPublisher;
+    serviceName = serviceEntity.getServiceName();
+
+    ServiceDesiredStateEntity serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
     serviceDesiredStateEntityPK = getServiceDesiredStateEntityPK(serviceDesiredStateEntity);
     serviceEntityPK = getServiceEntityPK(serviceEntity);
 
-    components = new HashMap<String, ServiceComponent>();
-
     if (!serviceEntity.getServiceComponentDesiredStateEntities().isEmpty()) {
       for (ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity
           : serviceEntity.getServiceComponentDesiredStateEntities()) {
@@ -177,18 +174,11 @@ public class ServiceImpl implements Service {
     ServiceInfo sInfo = ambariMetaInfo.getService(stackId.getStackName(),
         stackId.getStackVersion(), getName());
     isClientOnlyService = sInfo.isClientOnlyService();
-
-    persisted = true;
-  }
-
-  @Override
-  public ReadWriteLock getClusterGlobalLock() {
-    return clusterGlobalLock;
   }
 
   @Override
   public String getName() {
-    return serviceEntity.getServiceName();
+    return serviceName;
   }
 
   @Override
@@ -198,145 +188,75 @@ public class ServiceImpl implements Service {
 
   @Override
   public Map<String, ServiceComponent> getServiceComponents() {
-    readWriteLock.readLock().lock();
-    try {
-      return new HashMap<String, ServiceComponent>(components);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    return new HashMap<String, ServiceComponent>(components);
   }
 
   @Override
   public void addServiceComponents(
       Map<String, ServiceComponent> components) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        for (ServiceComponent sc : components.values()) {
-          addServiceComponent(sc);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
+    for (ServiceComponent sc : components.values()) {
+      addServiceComponent(sc);
     }
   }
 
   @Override
   public void addServiceComponent(ServiceComponent component) throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        // TODO validation
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponent to Service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + component.getName());
-        }
-        if (components.containsKey(component.getName())) {
-          throw new AmbariException("Cannot add duplicate ServiceComponent"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + component.getName());
-        }
-        components.put(component.getName(), component);
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
+    if (components.containsKey(component.getName())) {
+      throw new AmbariException("Cannot add duplicate ServiceComponent"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", serviceComponentName=" + component.getName());
     }
+
+    components.put(component.getName(), component);
   }
 
   @Override
   public ServiceComponent addServiceComponent(String serviceComponentName)
       throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding a ServiceComponent to Service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + serviceComponentName);
-        }
-        if (components.containsKey(serviceComponentName)) {
-          throw new AmbariException("Cannot add duplicate ServiceComponent"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + getName()
-              + ", serviceComponentName=" + serviceComponentName);
-        }
-        ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
-        components.put(component.getName(), component);
-        return component;
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
+    ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
+    addServiceComponent(component);
+    return component;
   }
 
   @Override
   public ServiceComponent getServiceComponent(String componentName)
       throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      if (!components.containsKey(componentName)) {
-        throw new ServiceComponentNotFoundException(cluster.getClusterName(),
-            getName(), componentName);
-      }
-      return components.get(componentName);
-    } finally {
-      readWriteLock.readLock().unlock();
+    ServiceComponent serviceComponent = components.get(componentName);
+    if (null == serviceComponent) {
+      throw new ServiceComponentNotFoundException(cluster.getClusterName(),
+          getName(), componentName);
     }
+
+    return serviceComponent;
   }
 
   @Override
   public State getDesiredState() {
-    readWriteLock.readLock().lock();
-    try {
-      return getServiceDesiredStateEntity().getDesiredState();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    return serviceDesiredStateEntity.getDesiredState();
   }
 
   @Override
   public void setDesiredState(State state) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredState of Service" + ", clusterName="
-            + cluster.getClusterName() + ", clusterId="
-            + cluster.getClusterId() + ", serviceName=" + getName()
-            + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
-            + state + ", persisted = " + isPersisted());
-      }
-      getServiceDesiredStateEntity().setDesiredState(state);
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredState of Service" + ", clusterName="
+          + cluster.getClusterName() + ", clusterId="
+          + cluster.getClusterId() + ", serviceName=" + getName()
+          + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
+          + state);
     }
+
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    serviceDesiredStateEntity.setDesiredState(state);
+    serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
   }
 
   @Override
   public SecurityState getSecurityState() {
-    readWriteLock.readLock().lock();
-    try {
-      return getServiceDesiredStateEntity().getSecurityState();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    return serviceDesiredStateEntity.getSecurityState();
   }
 
   @Override
@@ -345,70 +265,52 @@ public class ServiceImpl implements Service {
       throw new AmbariException("The security state must be an endpoint state");
     }
 
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredSecurityState of Service" + ", clusterName="
-            + cluster.getClusterName() + ", clusterId="
-            + cluster.getClusterId() + ", serviceName=" + getName()
-            + ", oldDesiredSecurityState=" + getSecurityState()
-            + ", newDesiredSecurityState=" + securityState);
-      }
-      getServiceDesiredStateEntity().setSecurityState(securityState);
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredSecurityState of Service" + ", clusterName="
+          + cluster.getClusterName() + ", clusterId="
+          + cluster.getClusterId() + ", serviceName=" + getName()
+          + ", oldDesiredSecurityState=" + getSecurityState()
+          + ", newDesiredSecurityState=" + securityState);
     }
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    serviceDesiredStateEntity.setSecurityState(securityState);
+    serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
   }
 
   @Override
   public StackId getDesiredStackVersion() {
-    readWriteLock.readLock().lock();
-    try {
-      StackEntity desiredStackEntity = getServiceDesiredStateEntity().getDesiredStack();
-      if( null != desiredStackEntity ) {
-        return new StackId(desiredStackEntity);
-      } else {
-        return null;
-      }
-    } finally {
-      readWriteLock.readLock().unlock();
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    StackEntity desiredStackEntity = serviceDesiredStateEntity.getDesiredStack();
+    if( null != desiredStackEntity ) {
+      return new StackId(desiredStackEntity);
+    } else {
+      return null;
     }
   }
 
   @Override
   public void setDesiredStackVersion(StackId stack) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
-            + cluster.getClusterName() + ", clusterId="
-            + cluster.getClusterId() + ", serviceName=" + getName()
-            + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-            + ", newDesiredStackVersion=" + stack);
-      }
-
-      StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
-      getServiceDesiredStateEntity().setDesiredStack(stackEntity);
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
+          + cluster.getClusterName() + ", clusterId="
+          + cluster.getClusterId() + ", serviceName=" + getName()
+          + ", oldDesiredStackVersion=" + getDesiredStackVersion()
+          + ", newDesiredStackVersion=" + stack);
     }
+
+    StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    serviceDesiredStateEntity.setDesiredStack(stackEntity);
+    serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
   }
 
   @Override
   public ServiceResponse convertToResponse() {
-    readWriteLock.readLock().lock();
-    try {
-      ServiceResponse r = new ServiceResponse(cluster.getClusterId(),
-          cluster.getClusterName(), getName(),
-          getDesiredStackVersion().getStackId(), getDesiredState().toString());
+    ServiceResponse r = new ServiceResponse(cluster.getClusterId(), cluster.getClusterName(),
+        getName(), getDesiredStackVersion().getStackId(), getDesiredState().toString());
 
-      r.setMaintenanceState(getMaintenanceState().name());
-      return r;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
+    r.setMaintenanceState(getMaintenanceState().name());
+    return r;
   }
 
   @Override
@@ -418,149 +320,77 @@ public class ServiceImpl implements Service {
 
   @Override
   public void debugDump(StringBuilder sb) {
-    readWriteLock.readLock().lock();
-    try {
-      sb.append("Service={ serviceName=" + getName() + ", clusterName="
-          + cluster.getClusterName() + ", clusterId=" + cluster.getClusterId()
-          + ", desiredStackVersion=" + getDesiredStackVersion()
-          + ", desiredState=" + getDesiredState().toString()
-          + ", components=[ ");
-      boolean first = true;
-      for (ServiceComponent sc : components.values()) {
-        if (!first) {
-          sb.append(" , ");
-        }
-        first = false;
-        sb.append("\n      ");
-        sc.debugDump(sb);
-        sb.append(" ");
+    sb.append("Service={ serviceName=" + getName() + ", clusterName=" + cluster.getClusterName()
+        + ", clusterId=" + cluster.getClusterId() + ", desiredStackVersion="
+        + getDesiredStackVersion() + ", desiredState=" + getDesiredState().toString()
+        + ", components=[ ");
+    boolean first = true;
+    for (ServiceComponent sc : components.values()) {
+      if (!first) {
+        sb.append(" , ");
       }
-      sb.append(" ] }");
-    } finally {
-      readWriteLock.readLock().unlock();
+      first = false;
+      sb.append("\n      ");
+      sc.debugDump(sb);
+      sb.append(" ");
     }
+    sb.append(" ] }");
   }
 
   /**
-   * {@inheritDoc}
+   *
    */
-  @Override
-  public boolean isPersisted() {
-    // a lock around this internal state variable is not required since we
-    // have appropriate locks in the persist() method and this member is
-    // only ever false under the condition that the object is new
-    return persisted;
-  }
+  private void persist(ClusterServiceEntity serviceEntity) {
+    persistEntities(serviceEntity);
+    refresh();
 
-  /**
-   * {@inheritDoc}
-   * <p/>
-   * This method uses Java locks and then delegates to internal methods which
-   * perform the JPA merges inside of a transaction. Because of this, a
-   * transaction is not necessary before this calling this method.
-   */
-  @Override
-  public void persist() {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (!persisted) {
-          persistEntities();
-          refresh();
-          // There refresh calls are no longer needed with cached references
-          // not used on getters/setters
-          // cluster.refresh();
-          persisted = true;
-
-          // publish the service installed event
-          StackId stackId = cluster.getDesiredStackVersion();
-          cluster.addService(this);
-
-          ServiceInstalledEvent event = new ServiceInstalledEvent(
-              getClusterId(), stackId.getStackName(),
-              stackId.getStackVersion(), getName());
-
-          eventPublisher.publish(event);
-        } else {
-          saveIfPersisted();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
+    // publish the service installed event
+    StackId stackId = cluster.getDesiredStackVersion();
+    cluster.addService(this);
+
+    ServiceInstalledEvent event = new ServiceInstalledEvent(getClusterId(), stackId.getStackName(),
+        stackId.getStackVersion(), getName());
+
+    eventPublisher.publish(event);
   }
 
   @Transactional
-  protected void persistEntities() {
+  private void persistEntities(ClusterServiceEntity serviceEntity) {
     long clusterId = cluster.getClusterId();
-
     ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
     serviceEntity.setClusterEntity(clusterEntity);
     clusterServiceDAO.create(serviceEntity);
-    serviceDesiredStateDAO.create(serviceDesiredStateEntity);
     clusterEntity.getClusterServiceEntities().add(serviceEntity);
     clusterDAO.merge(clusterEntity);
     clusterServiceDAO.merge(serviceEntity);
-    serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
   }
 
-  @Transactional
-  void saveIfPersisted() {
-    if (isPersisted()) {
-      clusterServiceDAO.merge(serviceEntity);
-      serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
-    }
-  }
-
-  @Override
   @Transactional
   public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted()) {
-        ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
-        pk.setClusterId(getClusterId());
-        pk.setServiceName(getName());
-        serviceEntity = clusterServiceDAO.findByPK(pk);
-        serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
-        clusterServiceDAO.refresh(serviceEntity);
-        serviceDesiredStateDAO.refresh(serviceDesiredStateEntity);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
+    pk.setClusterId(getClusterId());
+    pk.setServiceName(getName());
+    ClusterServiceEntity serviceEntity = getServiceEntity();
+    clusterServiceDAO.refresh(serviceEntity);
+    serviceDesiredStateDAO.refresh(serviceEntity.getServiceDesiredStateEntity());
   }
 
   @Override
   public boolean canBeRemoved() {
-    clusterGlobalLock.readLock().lock();
-    try {
-      readWriteLock.readLock().lock();
-      try {
-        //
-        // A service can be deleted if all it's components
-        // can be removed, irrespective of the state of
-        // the service itself.
-        //
-        for (ServiceComponent sc : components.values()) {
-          if (!sc.canBeRemoved()) {
-            LOG.warn("Found non removable component when trying to delete service"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", serviceName=" + getName()
-                + ", componentName=" + sc.getName());
-            return false;
-          }
-        }
-        return true;
-      } finally {
-        readWriteLock.readLock().unlock();
+    //
+    // A service can be deleted if all it's components
+    // can be removed, irrespective of the state of
+    // the service itself.
+    //
+    for (ServiceComponent sc : components.values()) {
+      if (!sc.canBeRemoved()) {
+        LOG.warn("Found non removable component when trying to delete service" + ", clusterName="
+            + cluster.getClusterName() + ", serviceName=" + getName() + ", componentName="
+            + sc.getName());
+        return false;
       }
-    } finally {
-      clusterGlobalLock.readLock().unlock();
     }
+    return true;
   }
 
   @Transactional
@@ -599,71 +429,54 @@ public class ServiceImpl implements Service {
       serviceConfigDAO.remove(serviceConfigEntity);
     }
   }
-  
+
   @Override
   @Transactional
   public void deleteAllComponents() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    lock.lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        LOG.info("Deleting all components for service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", serviceName=" + getName());
-        // FIXME check dependencies from meta layer
-        for (ServiceComponent component : components.values()) {
-          if (!component.canBeRemoved()) {
-            throw new AmbariException("Found non removable component when trying to"
-                + " delete all components from service"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", serviceName=" + getName()
-                + ", componentName=" + component.getName());
-          }
-        }
-
-        for (ServiceComponent serviceComponent : components.values()) {
-          serviceComponent.delete();
+      LOG.info("Deleting all components for service" + ", clusterName=" + cluster.getClusterName()
+          + ", serviceName=" + getName());
+      // FIXME check dependencies from meta layer
+      for (ServiceComponent component : components.values()) {
+        if (!component.canBeRemoved()) {
+          throw new AmbariException("Found non removable component when trying to"
+              + " delete all components from service" + ", clusterName=" + cluster.getClusterName()
+              + ", serviceName=" + getName() + ", componentName=" + component.getName());
         }
+      }
 
-        components.clear();
-      } finally {
-        readWriteLock.writeLock().unlock();
+      for (ServiceComponent serviceComponent : components.values()) {
+        serviceComponent.delete();
       }
+
+      components.clear();
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      lock.unlock();
     }
   }
 
   @Override
   public void deleteServiceComponent(String componentName)
       throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
+    lock.lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        ServiceComponent component = getServiceComponent(componentName);
-        LOG.info("Deleting servicecomponent for cluster"
+      ServiceComponent component = getServiceComponent(componentName);
+      LOG.info("Deleting servicecomponent for cluster" + ", clusterName=" + cluster.getClusterName()
+          + ", serviceName=" + getName() + ", componentName=" + componentName);
+      // FIXME check dependencies from meta layer
+      if (!component.canBeRemoved()) {
+        throw new AmbariException("Could not delete component from cluster"
             + ", clusterName=" + cluster.getClusterName()
             + ", serviceName=" + getName()
             + ", componentName=" + componentName);
-        // FIXME check dependencies from meta layer
-        if (!component.canBeRemoved()) {
-          throw new AmbariException("Could not delete component from cluster"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", serviceName=" + getName()
-              + ", componentName=" + componentName);
-        }
-
-        component.delete();
-        components.remove(componentName);
-      } finally {
-        readWriteLock.writeLock().unlock();
       }
+
+      component.delete();
+      components.remove(componentName);
     } finally {
-      clusterGlobalLock.writeLock().unlock();
+      lock.unlock();
     }
-
-
   }
 
   @Override
@@ -674,33 +487,18 @@ public class ServiceImpl implements Service {
   @Override
   @Transactional
   public void delete() throws AmbariException {
-    clusterGlobalLock.writeLock().lock();
-    try {
-      readWriteLock.writeLock().lock();
-      try {
-        deleteAllComponents();
-        deleteAllServiceConfigs();
-
-        if (persisted) {
-          removeEntities();
-          persisted = false;
-
-          // publish the service removed event
-          StackId stackId = cluster.getDesiredStackVersion();
+    deleteAllComponents();
+    deleteAllServiceConfigs();
 
-          ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(),
-              stackId.getStackName(), stackId.getStackVersion(), getName());
+    removeEntities();
 
-          eventPublisher.publish(event);
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
+    // publish the service removed event
+    StackId stackId = cluster.getDesiredStackVersion();
 
+    ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(),
+        stackId.getStackVersion(), getName());
 
+    eventPublisher.publish(event);
   }
 
   @Transactional
@@ -716,17 +514,13 @@ public class ServiceImpl implements Service {
 
   @Override
   public void setMaintenanceState(MaintenanceState state) {
-    readWriteLock.writeLock().lock();
-    try {
-      getServiceDesiredStateEntity().setMaintenanceState(state);
-      saveIfPersisted();
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    serviceDesiredStateEntity.setMaintenanceState(state);
+    serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
 
-      // broadcast the maintenance mode change
-      MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
-      eventPublisher.publish(event);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
+    // broadcast the maintenance mode change
+    MaintenanceModeEvent event = new MaintenanceModeEvent(state, this);
+    eventPublisher.publish(event);
   }
 
   @Override
@@ -735,10 +529,7 @@ public class ServiceImpl implements Service {
   }
 
   private ClusterServiceEntity getServiceEntity() {
-    if (isPersisted()) {
-      serviceEntity = clusterServiceDAO.findByPK(serviceEntityPK);
-    }
-    return serviceEntity;
+    return clusterServiceDAO.findByPK(serviceEntityPK);
   }
 
   private ClusterServiceEntityPK getServiceEntityPK(ClusterServiceEntity serviceEntity) {
@@ -757,9 +548,6 @@ public class ServiceImpl implements Service {
 
   // Refresh the cached reference on setters
   private ServiceDesiredStateEntity getServiceDesiredStateEntity() {
-    if (isPersisted()) {
-      serviceDesiredStateEntity = serviceDesiredStateDAO.findByPK(serviceDesiredStateEntityPK);
-    }
-    return serviceDesiredStateEntity;
+    return serviceDesiredStateDAO.findByPK(serviceDesiredStateEntityPK);
   }
 }

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 286 - 477
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java


+ 252 - 376
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java

@@ -28,8 +28,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.CopyOnWriteArrayList;
 
 import javax.persistence.RollbackException;
 
@@ -103,18 +102,12 @@ public class ClustersImpl implements Clusters {
   private static final Logger LOG = LoggerFactory.getLogger(
       ClustersImpl.class);
 
-  private ConcurrentHashMap<String, Cluster> clusters;
-  private ConcurrentHashMap<Long, Cluster> clustersById;
-  private ConcurrentHashMap<String, Host> hosts;
-  private ConcurrentHashMap<Long, Host> hostsById;
-  private ConcurrentHashMap<String, Set<Cluster>> hostClusterMap;
-  private ConcurrentHashMap<String, Set<Host>> clusterHostMap;
-
-  private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
-  private final Lock r = rwl.readLock();
-  private final Lock w = rwl.writeLock();
-
-  private volatile boolean clustersLoaded = false;
+  private final ConcurrentHashMap<String, Cluster> clusters = new ConcurrentHashMap<String, Cluster>();
+  private final ConcurrentHashMap<Long, Cluster> clustersById = new ConcurrentHashMap<Long, Cluster>();
+  private final ConcurrentHashMap<String, Host> hosts = new ConcurrentHashMap<String, Host>();
+  private final ConcurrentHashMap<Long, Host> hostsById = new ConcurrentHashMap<Long, Host>();
+  private final ConcurrentHashMap<String, Set<Cluster>> hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
+  private final ConcurrentHashMap<String, Set<Host>> clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
 
   @Inject
   private ClusterDAO clusterDAO;
@@ -168,33 +161,36 @@ public class ClustersImpl implements Clusters {
   private AmbariEventPublisher eventPublisher;
 
   @Inject
-  public ClustersImpl() {
-    clusters = new ConcurrentHashMap<String, Cluster>();
-    clustersById = new ConcurrentHashMap<Long, Cluster>();
-    hosts = new ConcurrentHashMap<String, Host>();
-    hostsById = new ConcurrentHashMap<Long, Host>();
-    hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
-    clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
-
-    LOG.info("Initializing the ClustersImpl");
-  }
+  public ClustersImpl(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
+      HostFactory hostFactory) {
 
-  private void checkLoaded() {
-    if (!clustersLoaded) {
-      w.lock();
-      try {
-        if (!clustersLoaded) {
-          loadClustersAndHosts();
-        }
-        clustersLoaded = true;
-      } finally {
-        w.unlock();
-      }
-    }
+    this.clusterDAO = clusterDAO;
+    this.clusterFactory = clusterFactory;
+    this.hostDAO = hostDAO;
+    this.hostFactory = hostFactory;
   }
 
+  /**
+   * Inititalizes all of the in-memory state collections that this class
+   * unfortunately uses. It's annotated with {@link Inject} as a way to define a
+   * very simple lifecycle with Guice where the constructor is instantiated
+   * (allowing injected members) followed by this method which initiailizes the
+   * state of the instance.
+   * <p/>
+   * Because some of these stateful initializations may actually reference this
+   * {@link Clusters} instance, we must do this after the object has been
+   * instantiated and injected.
+   */
+  @Inject
   @Transactional
-  void loadClustersAndHosts() {
+  private void loadClustersAndHosts() {
+    List<HostEntity> hostEntities = hostDAO.findAll();
+    for (HostEntity hostEntity : hostEntities) {
+      Host host = hostFactory.create(hostEntity);
+      hosts.put(hostEntity.getHostName(), host);
+      hostsById.put(hostEntity.getHostId(), host);
+    }
+
     for (ClusterEntity clusterEntity : clusterDAO.findAll()) {
       Cluster currentCluster = clusterFactory.create(clusterEntity);
       clusters.put(clusterEntity.getClusterName(), currentCluster);
@@ -202,13 +198,11 @@ public class ClustersImpl implements Clusters {
       clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
     }
 
-    for (HostEntity hostEntity : hostDAO.findAll()) {
-      Host host = hostFactory.create(hostEntity, true);
-      hosts.put(hostEntity.getHostName(), host);
-      hostsById.put(hostEntity.getHostId(), host);
+    for (HostEntity hostEntity : hostEntities) {
       Set<Cluster> cSet = Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>());
       hostClusterMap.put(hostEntity.getHostName(), cSet);
 
+      Host host = hosts.get(hostEntity.getHostName());
       for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
         clusterHostMap.get(clusterEntity.getClusterName()).add(host);
         cSet.add(clusters.get(clusterEntity.getClusterName()));
@@ -225,66 +219,56 @@ public class ClustersImpl implements Clusters {
   @Override
   public void addCluster(String clusterName, StackId stackId, SecurityType securityType)
       throws AmbariException {
-    checkLoaded();
-
     Cluster cluster = null;
 
-    w.lock();
-    try {
-      if (clusters.containsKey(clusterName)) {
-        throw new DuplicateResourceException("Attempted to create a Cluster which already exists"
-            + ", clusterName=" + clusterName);
-      }
+    if (clusters.containsKey(clusterName)) {
+      throw new DuplicateResourceException(
+          "Attempted to create a Cluster which already exists" + ", clusterName=" + clusterName);
+    }
 
-      // create an admin resource to represent this cluster
-      ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
-      if (resourceTypeEntity == null) {
-        resourceTypeEntity = new ResourceTypeEntity();
-        resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-        resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-        resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
-      }
+    // create an admin resource to represent this cluster
+    ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
+    if (resourceTypeEntity == null) {
+      resourceTypeEntity = new ResourceTypeEntity();
+      resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
+      resourceTypeEntity.setName(ResourceType.CLUSTER.name());
+      resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
+    }
 
-      ResourceEntity resourceEntity = new ResourceEntity();
-      resourceEntity.setResourceType(resourceTypeEntity);
+    ResourceEntity resourceEntity = new ResourceEntity();
+    resourceEntity.setResourceType(resourceTypeEntity);
 
-      StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
-          stackId.getStackVersion());
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
 
-      // retrieve new cluster id
-      // add cluster id -> cluster mapping into clustersById
-      ClusterEntity clusterEntity = new ClusterEntity();
-      clusterEntity.setClusterName(clusterName);
-      clusterEntity.setDesiredStack(stackEntity);
-      clusterEntity.setResource(resourceEntity);
-      if (securityType != null) {
-        clusterEntity.setSecurityType(securityType);
-      }
-
-      try {
-        clusterDAO.create(clusterEntity);
-        clusterEntity = clusterDAO.merge(clusterEntity);
-      } catch (RollbackException e) {
-        LOG.warn("Unable to create cluster " + clusterName, e);
-        throw new AmbariException("Unable to create cluster " + clusterName, e);
-      }
+    // retrieve new cluster id
+    // add cluster id -> cluster mapping into clustersById
+    ClusterEntity clusterEntity = new ClusterEntity();
+    clusterEntity.setClusterName(clusterName);
+    clusterEntity.setDesiredStack(stackEntity);
+    clusterEntity.setResource(resourceEntity);
+    if (securityType != null) {
+      clusterEntity.setSecurityType(securityType);
+    }
 
-      cluster = clusterFactory.create(clusterEntity);
-      clusters.put(clusterName, cluster);
-      clustersById.put(cluster.getClusterId(), cluster);
-      clusterHostMap.put(clusterName, new HashSet<Host>());
-    } finally {
-      w.unlock();
+    try {
+      clusterDAO.create(clusterEntity);
+    } catch (RollbackException e) {
+      LOG.warn("Unable to create cluster " + clusterName, e);
+      throw new AmbariException("Unable to create cluster " + clusterName, e);
     }
 
+    cluster = clusterFactory.create(clusterEntity);
+    clusters.put(clusterName, cluster);
+    clustersById.put(cluster.getClusterId(), cluster);
+    clusterHostMap.put(clusterName,
+        Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
+
     cluster.setCurrentStackVersion(stackId);
   }
 
   @Override
   public Cluster getCluster(String clusterName)
       throws AmbariException {
-    checkLoaded();
-
     Cluster cluster = null;
     if (clusterName != null) {
       cluster = clusters.get(clusterName);
@@ -299,8 +283,6 @@ public class ClustersImpl implements Clusters {
   @Override
   public Cluster getCluster(Long clusterId)
     throws AmbariException {
-    checkLoaded();
-
     Cluster cluster = null;
     if (clusterId != null) {
       cluster = clustersById.get(clusterId);
@@ -314,8 +296,6 @@ public class ClustersImpl implements Clusters {
 
   @Override
   public Cluster getClusterById(long id) throws AmbariException {
-    checkLoaded();
-
     Cluster cluster = clustersById.get(id);
     if (null == cluster) {
       throw new ClusterNotFoundException("clusterID=" + id);
@@ -327,6 +307,7 @@ public class ClustersImpl implements Clusters {
   @Override
   public void setCurrentStackVersion(String clusterName, StackId stackId)
       throws AmbariException{
+
     if(stackId == null || clusterName == null || clusterName.isEmpty()){
       LOG.warn("Unable to set version for cluster " + clusterName);
       throw new AmbariException("Unable to set"
@@ -334,19 +315,9 @@ public class ClustersImpl implements Clusters {
           + " for cluster " + clusterName);
     }
 
-    checkLoaded();
-
-    Cluster cluster = null;
-
-    r.lock();
-    try {
-      if (!clusters.containsKey(clusterName)) {
-        throw new ClusterNotFoundException(clusterName);
-      }
-
-      cluster = clusters.get(clusterName);
-    } finally {
-      r.unlock();
+    Cluster cluster = clusters.get(clusterName);
+    if (null == cluster) {
+      throw new ClusterNotFoundException(clusterName);
     }
 
     cluster.setCurrentStackVersion(stackId);
@@ -354,15 +325,12 @@ public class ClustersImpl implements Clusters {
 
   @Override
   public List<Host> getHosts() {
-    checkLoaded();
-
     return new ArrayList<Host>(hosts.values());
   }
 
   @Override
   public Set<Cluster> getClustersForHost(String hostname)
       throws AmbariException {
-    checkLoaded();
     Set<Cluster> clusters = hostClusterMap.get(hostname);
     if(clusters == null){
       throw new HostNotFoundException(hostname);
@@ -378,19 +346,16 @@ public class ClustersImpl implements Clusters {
 
   @Override
   public Host getHost(String hostname) throws AmbariException {
-    checkLoaded();
-
-    if (!hosts.containsKey(hostname)) {
+    Host host = hosts.get(hostname);
+    if (null == host) {
       throw new HostNotFoundException(hostname);
     }
 
-    return hosts.get(hostname);
+    return host;
   }
 
   @Override
   public boolean hostExists(String hostname){
-    checkLoaded();
-
     return hosts.containsKey(hostname);
   }
 
@@ -399,8 +364,6 @@ public class ClustersImpl implements Clusters {
    */
   @Override
   public boolean isHostMappedToCluster(String clusterName, String hostName) {
-    checkLoaded();
-
     Set<Cluster> clusters = hostClusterMap.get(hostName);
     for (Cluster cluster : clusters) {
       if (clusterName.equals(cluster.getClusterName())) {
@@ -413,8 +376,6 @@ public class ClustersImpl implements Clusters {
 
   @Override
   public Host getHostById(Long hostId) throws AmbariException {
-    checkLoaded();
-
     if (!hostsById.containsKey(hostId)) {
       throw new HostNotFoundException("Host Id = " + hostId);
     }
@@ -442,40 +403,32 @@ public class ClustersImpl implements Clusters {
    */
   @Override
   public void addHost(String hostname) throws AmbariException {
-    checkLoaded();
-
     if (hosts.containsKey(hostname)) {
       throw new AmbariException(MessageFormat.format("Duplicate entry for Host {0}", hostname));
     }
 
-    w.lock();
+    HostEntity hostEntity = new HostEntity();
+    hostEntity.setHostName(hostname);
+    hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
 
-    try {
-      HostEntity hostEntity = new HostEntity();
-      hostEntity.setHostName(hostname);
-      hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
-
-      //not stored to DB
-      Host host = hostFactory.create(hostEntity, false);
-      host.setAgentVersion(new AgentVersion(""));
-      List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
-      host.setDisksInfo(emptyDiskList);
-      host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
-      host.setHostAttributes(new HashMap<String, String>());
-      host.setState(HostState.INIT);
+    // not stored to DB
+    Host host = hostFactory.create(hostEntity);
+    host.setAgentVersion(new AgentVersion(""));
+    List<DiskInfo> emptyDiskList = new CopyOnWriteArrayList<DiskInfo>();
+    host.setDisksInfo(emptyDiskList);
+    host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
+    host.setHostAttributes(new ConcurrentHashMap<String, String>());
+    host.setState(HostState.INIT);
 
-      // the hosts by ID map is updated separately since the host has not yet
-      // been persisted yet - the below event is what causes the persist
-      hosts.put(hostname, host);
+    // the hosts by ID map is updated separately since the host has not yet
+    // been persisted yet - the below event is what causes the persist
+    hosts.put(hostname, host);
 
-      hostClusterMap.put(hostname, Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
+    hostClusterMap.put(hostname,
+        Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a host to Clusters"
-            + ", hostname=" + hostname);
-      }
-    } finally {
-      w.unlock();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding a host to Clusters" + ", hostname=" + hostname);
     }
 
     // publish the event
@@ -494,42 +447,35 @@ public class ClustersImpl implements Clusters {
   public void updateHostWithClusterAndAttributes(
       Map<String, Set<String>> hostClusters,
       Map<String, Map<String, String>> hostAttributes) throws AmbariException {
-    checkLoaded();
-    w.lock();
 
-    try {
-      if (hostClusters != null) {
-        Map<String, Host> hostMap = getHostsMap(hostClusters.keySet());
-        Set<String> clusterNames = new HashSet<String>();
-        for (Set<String> cSet : hostClusters.values()) {
-          clusterNames.addAll(cSet);
-        }
+    if (null == hostClusters || hostClusters.isEmpty()) {
+      return;
+    }
 
-        for (String hostname : hostClusters.keySet()) {
-          Host host = hostMap.get(hostname);
-          Map<String, String>  attributes = hostAttributes.get(hostname);
-          if (attributes != null && !attributes.isEmpty()){
-            host.setHostAttributes(attributes);
-          }
+    Map<String, Host> hostMap = getHostsMap(hostClusters.keySet());
+    Set<String> clusterNames = new HashSet<String>();
+    for (Set<String> cSet : hostClusters.values()) {
+      clusterNames.addAll(cSet);
+    }
 
-          host.refresh();
+    for (String hostname : hostClusters.keySet()) {
+      Host host = hostMap.get(hostname);
+      Map<String, String> attributes = hostAttributes.get(hostname);
+      if (attributes != null && !attributes.isEmpty()) {
+        host.setHostAttributes(attributes);
+      }
 
-          Set<String> hostClusterNames = hostClusters.get(hostname);
-          for (String clusterName : hostClusterNames) {
-            if (clusterName != null && !clusterName.isEmpty()) {
-              mapHostToCluster(hostname, clusterName);
-            }
-          }
+      Set<String> hostClusterNames = hostClusters.get(hostname);
+      for (String clusterName : hostClusterNames) {
+        if (clusterName != null && !clusterName.isEmpty()) {
+          mapHostToCluster(hostname, clusterName);
         }
       }
-    } finally {
-      w.unlock();
     }
   }
 
   private Map<String, Host> getHostsMap(Collection<String> hostSet) throws
       HostNotFoundException {
-    checkLoaded();
 
     Map<String, Host> hostMap = new HashMap<String, Host>();
     Host host = null;
@@ -557,15 +503,9 @@ public class ClustersImpl implements Clusters {
    */
   @Override
   public void mapHostsToCluster(Set<String> hostnames, String clusterName) throws AmbariException {
-    checkLoaded();
-    w.lock();
-    try {
-      ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
-      for (String hostname : hostnames) {
-        mapHostToCluster(hostname, clusterName, clusterVersionEntity);
-      }
-    } finally {
-      w.unlock();
+    ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
+    for (String hostname : hostnames) {
+      mapHostToCluster(hostname, clusterName, clusterVersionEntity);
     }
   }
 
@@ -582,22 +522,15 @@ public class ClustersImpl implements Clusters {
     Host host = null;
     Cluster cluster = null;
 
-    checkLoaded();
+    host = getHost(hostname);
+    cluster = getCluster(clusterName);
 
-    r.lock();
-    try {
-      host = getHost(hostname);
-      cluster = getCluster(clusterName);
-
-      // check to ensure there are no duplicates
-      for (Cluster c : hostClusterMap.get(hostname)) {
-        if (c.getClusterName().equals(clusterName)) {
-          throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
-              clusterName + ", hostName=" + hostname);
-        }
+    // check to ensure there are no duplicates
+    for (Cluster c : hostClusterMap.get(hostname)) {
+      if (c.getClusterName().equals(clusterName)) {
+        throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
+            clusterName + ", hostName=" + hostname);
       }
-    } finally {
-      r.unlock();
     }
 
     if (!isOsSupportedByClusterStack(cluster, host)) {
@@ -615,17 +548,11 @@ public class ClustersImpl implements Clusters {
           clusterId);
     }
 
-    w.lock();
-    try {
-      mapHostClusterEntities(hostname, clusterId);
-      hostClusterMap.get(hostname).add(cluster);
-      clusterHostMap.get(clusterName).add(host);
-    } finally {
-      w.unlock();
-    }
+    mapHostClusterEntities(hostname, clusterId);
+    hostClusterMap.get(hostname).add(cluster);
+    clusterHostMap.get(clusterName).add(host);
 
     cluster.refresh();
-    host.refresh();
   }
 
   /**
@@ -638,8 +565,6 @@ public class ClustersImpl implements Clusters {
   @Override
   public void mapHostToCluster(String hostname, String clusterName)
       throws AmbariException {
-    checkLoaded();
-
     ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
     mapHostToCluster(hostname, clusterName, clusterVersionEntity);
   }
@@ -662,169 +587,120 @@ public class ClustersImpl implements Clusters {
 
   @Override
   public Map<String, Cluster> getClusters() {
-    checkLoaded();
-    r.lock();
-    try {
-      return Collections.unmodifiableMap(clusters);
-    } finally {
-      r.unlock();
-    }
+    return Collections.unmodifiableMap(clusters);
   }
 
   @Override
   public void updateClusterName(String oldName, String newName) {
-    w.lock();
-    try {
-      clusters.put(newName, clusters.remove(oldName));
-      clusterHostMap.put(newName, clusterHostMap.remove(oldName));
-    } finally {
-      w.unlock();
-    }
+    clusters.put(newName, clusters.remove(oldName));
+    clusterHostMap.put(newName, clusterHostMap.remove(oldName));
   }
 
 
   @Override
   public void debugDump(StringBuilder sb) {
-    r.lock();
-    try {
-      sb.append("Clusters=[ ");
-      boolean first = true;
-      for (Cluster c : clusters.values()) {
-        if (!first) {
-          sb.append(" , ");
-        }
-        first = false;
-        sb.append("\n  ");
-        c.debugDump(sb);
-        sb.append(" ");
+    sb.append("Clusters=[ ");
+    boolean first = true;
+    for (Cluster c : clusters.values()) {
+      if (!first) {
+        sb.append(" , ");
       }
-      sb.append(" ]");
-    } finally {
-      r.unlock();
+      first = false;
+      sb.append("\n  ");
+      c.debugDump(sb);
+      sb.append(" ");
     }
+    sb.append(" ]");
   }
 
   @Override
   public Map<String, Host> getHostsForCluster(String clusterName)
       throws AmbariException {
 
-    checkLoaded();
-    r.lock();
-
-    try {
-      Map<String, Host> hosts = new HashMap<String, Host>();
-
-      for (Host h : clusterHostMap.get(clusterName)) {
-        hosts.put(h.getHostName(), h);
-      }
-
-      return hosts;
-    } finally {
-      r.unlock();
+    Map<String, Host> hosts = new HashMap<String, Host>();
+    for (Host h : clusterHostMap.get(clusterName)) {
+      hosts.put(h.getHostName(), h);
     }
+
+    return hosts;
   }
 
   @Override
   public Map<Long, Host> getHostIdsForCluster(String clusterName)
       throws AmbariException {
+    Map<Long, Host> hosts = new HashMap<Long, Host>();
 
-    checkLoaded();
-    r.lock();
-
-    try {
-      Map<Long, Host> hosts = new HashMap<Long, Host>();
-
-      for (Host h : clusterHostMap.get(clusterName)) {
-        HostEntity hostEntity = hostDAO.findByName(h.getHostName());
-        hosts.put(hostEntity.getHostId(), h);
-      }
-
-      return hosts;
-    } finally {
-      r.unlock();
+    for (Host h : clusterHostMap.get(clusterName)) {
+      HostEntity hostEntity = hostDAO.findByName(h.getHostName());
+      hosts.put(hostEntity.getHostId(), h);
     }
+
+    return hosts;
   }
 
   @Override
   public void deleteCluster(String clusterName)
       throws AmbariException {
-    checkLoaded();
-    w.lock();
-    try {
-      Cluster cluster = getCluster(clusterName);
-      if (!cluster.canBeRemoved()) {
-        throw new AmbariException("Could not delete cluster"
-            + ", clusterName=" + clusterName);
-      }
-      LOG.info("Deleting cluster " + cluster.getClusterName());
-      cluster.delete();
+    Cluster cluster = getCluster(clusterName);
+    if (!cluster.canBeRemoved()) {
+      throw new AmbariException("Could not delete cluster" + ", clusterName=" + clusterName);
+    }
 
-      //clear maps
-      for (Set<Cluster> clusterSet : hostClusterMap.values()) {
-        clusterSet.remove(cluster);
-      }
-      clusterHostMap.remove(cluster.getClusterName());
+    LOG.info("Deleting cluster " + cluster.getClusterName());
+    cluster.delete();
 
-      Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
-      for (ClusterVersionEntity clusterVersion : clusterVersions) {
-        clusterVersionDAO.remove(clusterVersion);
-      }
+    // clear maps
+    for (Set<Cluster> clusterSet : hostClusterMap.values()) {
+      clusterSet.remove(cluster);
+    }
+    clusterHostMap.remove(cluster.getClusterName());
 
-      clusters.remove(clusterName);
-    } finally {
-      w.unlock();
+    Collection<ClusterVersionEntity> clusterVersions = cluster.getAllClusterVersions();
+    for (ClusterVersionEntity clusterVersion : clusterVersions) {
+      clusterVersionDAO.remove(clusterVersion);
     }
+
+    clusters.remove(clusterName);
   }
 
   @Override
   public void unmapHostFromCluster(String hostname, String clusterName) throws AmbariException {
     final Cluster cluster = getCluster(clusterName);
-    unmapHostFromClusters(hostname, Sets.newHashSet(cluster));
+    Host host = getHost(hostname);
+
+    unmapHostFromClusters(host, Sets.newHashSet(cluster));
+
+    cluster.refresh();
   }
 
   @Transactional
-  void unmapHostFromClusters(String hostname, Set<Cluster> clusters) throws AmbariException {
-    Host host = null;
+  void unmapHostFromClusters(Host host, Set<Cluster> clusters) throws AmbariException {
     HostEntity hostEntity = null;
 
-    checkLoaded();
     if (clusters.isEmpty()) {
       return;
     }
 
-    r.lock();
-    try {
-      host = getHost(hostname);
-      hostEntity = hostDAO.findByName(hostname);
-    } finally {
-      r.unlock();
-    }
-
-    w.lock();
-    try {
-      for (Cluster cluster : clusters) {
-        long clusterId = cluster.getClusterId();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Unmapping host {} from cluster {} (id={})", hostname,
-              cluster.getClusterName(), clusterId);
-        }
-
-        unmapHostClusterEntities(hostname, cluster.getClusterId());
-
-        hostClusterMap.get(hostname).remove(cluster);
-        clusterHostMap.get(cluster.getClusterName()).remove(host);
+    String hostname = host.getHostName();
+    hostEntity = hostDAO.findByName(hostname);
 
-        host.refresh();
-        cluster.refresh();
+    for (Cluster cluster : clusters) {
+      long clusterId = cluster.getClusterId();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unmapping host {} from cluster {} (id={})", hostname, cluster.getClusterName(),
+            clusterId);
       }
 
-      deleteConfigGroupHostMapping(hostEntity.getHostId());
+      unmapHostClusterEntities(hostname, cluster.getClusterId());
 
-      // Remove mapping of principals to the unmapped host
-      kerberosPrincipalHostDAO.removeByHost(hostEntity.getHostId());
-    } finally {
-      w.unlock();
+      hostClusterMap.get(hostname).remove(cluster);
+      clusterHostMap.get(cluster.getClusterName()).remove(host);
     }
+
+    deleteConfigGroupHostMapping(hostEntity.getHostId());
+
+    // Remove mapping of principals to the unmapped host
+    kerberosPrincipalHostDAO.removeByHost(hostEntity.getHostId());
   }
 
   @Transactional
@@ -890,88 +766,81 @@ public class ClustersImpl implements Clusters {
    */
   @Transactional
   void deleteHostEntityRelationships(String hostname) throws AmbariException {
-    checkLoaded();
-
     if (!hosts.containsKey(hostname)) {
       throw new HostNotFoundException("Could not find host " + hostname);
     }
 
-    w.lock();
-
-    try {
-      HostEntity entity = hostDAO.findByName(hostname);
-
-      if (entity == null) {
-        return;
-      }
-      // Remove from all clusters in the cluster_host_mapping table.
-      // This will also remove from kerberos_principal_hosts, hostconfigmapping, and configgrouphostmapping
-      Set<Cluster> clusters = hostClusterMap.get(hostname);
-      Set<Long> clusterIds = Sets.newHashSet();
-      for (Cluster cluster: clusters) {
-        clusterIds.add(cluster.getClusterId());
-      }
-
+    HostEntity entity = hostDAO.findByName(hostname);
 
+    if (entity == null) {
+      return;
+    }
 
+    // Remove from all clusters in the cluster_host_mapping table.
+    // This will also remove from kerberos_principal_hosts, hostconfigmapping,
+    // and configgrouphostmapping
+    Set<Cluster> clusters = hostClusterMap.get(hostname);
+    Set<Long> clusterIds = Sets.newHashSet();
+    for (Cluster cluster : clusters) {
+      clusterIds.add(cluster.getClusterId());
+    }
 
-      unmapHostFromClusters(hostname, clusters);
-      hostDAO.refresh(entity);
+    Host host = hosts.get(hostname);
+    unmapHostFromClusters(host, clusters);
+    hostDAO.refresh(entity);
 
-      hostVersionDAO.removeByHostName(hostname);
+    hostVersionDAO.removeByHostName(hostname);
 
-      // Remove blueprint tasks before hostRoleCommands
-      // TopologyLogicalTask owns the OneToOne relationship but Cascade is on HostRoleCommandEntity
-      if (entity.getHostRoleCommandEntities() != null) {
-        for (HostRoleCommandEntity hrcEntity : entity.getHostRoleCommandEntities()) {
-          TopologyLogicalTaskEntity topologyLogicalTaskEnity = hrcEntity.getTopologyLogicalTaskEntity();
-          if (topologyLogicalTaskEnity != null) {
-            topologyLogicalTaskDAO.remove(topologyLogicalTaskEnity);
-            hrcEntity.setTopologyLogicalTaskEntity(null);
-          }
+    // Remove blueprint tasks before hostRoleCommands
+    // TopologyLogicalTask owns the OneToOne relationship but Cascade is on
+    // HostRoleCommandEntity
+    if (entity.getHostRoleCommandEntities() != null) {
+      for (HostRoleCommandEntity hrcEntity : entity.getHostRoleCommandEntities()) {
+        TopologyLogicalTaskEntity topologyLogicalTaskEnity = hrcEntity.getTopologyLogicalTaskEntity();
+        if (topologyLogicalTaskEnity != null) {
+          topologyLogicalTaskDAO.remove(topologyLogicalTaskEnity);
+          hrcEntity.setTopologyLogicalTaskEntity(null);
         }
       }
-      for (Long clusterId: clusterIds) {
-        for (TopologyRequestEntity topologyRequestEntity: topologyRequestDAO.findByClusterId(clusterId)) {
-          TopologyLogicalRequestEntity topologyLogicalRequestEntity = topologyRequestEntity.getTopologyLogicalRequestEntity();
-
-          for (TopologyHostRequestEntity topologyHostRequestEntity: topologyLogicalRequestEntity.getTopologyHostRequestEntities()) {
-            if (hostname.equals(topologyHostRequestEntity.getHostName())) {
-              topologyHostRequestDAO.remove(topologyHostRequestEntity);
-            }
+    }
+
+    for (Long clusterId : clusterIds) {
+      for (TopologyRequestEntity topologyRequestEntity : topologyRequestDAO.findByClusterId(
+          clusterId)) {
+        TopologyLogicalRequestEntity topologyLogicalRequestEntity = topologyRequestEntity.getTopologyLogicalRequestEntity();
+
+        for (TopologyHostRequestEntity topologyHostRequestEntity : topologyLogicalRequestEntity.getTopologyHostRequestEntities()) {
+          if (hostname.equals(topologyHostRequestEntity.getHostName())) {
+            topologyHostRequestDAO.remove(topologyHostRequestEntity);
           }
         }
       }
+    }
 
 
-      entity.setHostRoleCommandEntities(null);
-      hostRoleCommandDAO.removeByHostId(entity.getHostId());
+    entity.setHostRoleCommandEntities(null);
+    hostRoleCommandDAO.removeByHostId(entity.getHostId());
 
-      entity.setHostStateEntity(null);
-      hostStateDAO.removeByHostId(entity.getHostId());
-      hostConfigMappingDAO.removeByHostId(entity.getHostId());
-      serviceConfigDAO.removeHostFromServiceConfigs(entity.getHostId());
-      requestOperationLevelDAO.removeByHostId(entity.getHostId());
-      topologyHostInfoDAO.removeByHost(entity);
+    entity.setHostStateEntity(null);
+    hostStateDAO.removeByHostId(entity.getHostId());
+    hostConfigMappingDAO.removeByHostId(entity.getHostId());
+    serviceConfigDAO.removeHostFromServiceConfigs(entity.getHostId());
+    requestOperationLevelDAO.removeByHostId(entity.getHostId());
+    topologyHostInfoDAO.removeByHost(entity);
 
-      // Remove from dictionaries
-      hosts.remove(hostname);
-      hostsById.remove(entity.getHostId());
+    // Remove from dictionaries
+    hosts.remove(hostname);
+    hostsById.remove(entity.getHostId());
 
-      hostDAO.remove(entity);
+    hostDAO.remove(entity);
 
-      // Note, if the host is still heartbeating, then new records will be re-inserted
-      // into the hosts and hoststate tables
-    } catch (Exception e) {
-      throw new AmbariException("Could not remove host", e);
-    } finally {
-      w.unlock();
-    }
+    // Note, if the host is still heartbeating, then new records will be
+    // re-inserted
+    // into the hosts and hoststate tables
   }
 
   @Override
   public boolean checkPermission(String clusterName, boolean readOnly) {
-
     Cluster cluster = findCluster(clusterName);
 
     return (cluster == null && readOnly) || checkPermission(cluster, readOnly);
@@ -999,19 +868,14 @@ public class ClustersImpl implements Clusters {
    */
   @Override
   public int getClusterSize(String clusterName) {
-    checkLoaded();
-    r.lock();
-
     int hostCount = 0;
 
-    if (clusterHostMap.containsKey(clusterName) && clusterHostMap.get(clusterName) != null) {
+    Set<Host> hosts = clusterHostMap.get(clusterName);
+    if (null != hosts) {
       hostCount = clusterHostMap.get(clusterName).size();
     }
 
-    r.unlock();
-
     return hostCount;
-
   }
 
   // ----- helper methods ---------------------------------------------------
@@ -1064,4 +928,16 @@ public class ClustersImpl implements Clusters {
     // TODO : should we log this?
     return false;
   }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void invalidate(Cluster cluster) {
+    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+    Cluster currentCluster = clusterFactory.create(clusterEntity);
+    clusters.put(clusterEntity.getClusterName(), currentCluster);
+    clustersById.put(currentCluster.getClusterId(), currentCluster);
+    clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
+  }
 }

+ 36 - 56
ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java

@@ -44,7 +44,6 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -80,8 +79,6 @@ public class ConfigGroupImpl implements ConfigGroup {
   private ClusterDAO clusterDAO;
   @Inject
   Clusters clusters;
-  @Inject
-  private ConfigFactory configFactory;
 
   @AssistedInject
   public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
@@ -317,23 +314,18 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   @Override
   public void persist() {
-    cluster.getClusterGlobalLock().writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        if (!isPersisted) {
-          persistEntities();
-          refresh();
-          cluster.refresh();
-          isPersisted = true;
-        } else {
-          saveIfPersisted();
-        }
-      } finally {
-        readWriteLock.writeLock().unlock();
+      if (!isPersisted) {
+        persistEntities();
+        refresh();
+        cluster.refresh();
+        isPersisted = true;
+      } else {
+        saveIfPersisted();
       }
     } finally {
-      cluster.getClusterGlobalLock().writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
@@ -465,20 +457,15 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   @Override
   public void delete() {
-    cluster.getClusterGlobalLock().writeLock().lock();
+    readWriteLock.writeLock().lock();
     try {
-      readWriteLock.writeLock().lock();
-      try {
-        configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-        configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
-        configGroupDAO.removeByPK(configGroupEntity.getGroupId());
-        cluster.refresh();
-        isPersisted = false;
-      } finally {
-        readWriteLock.writeLock().unlock();
-      }
+      configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+      configGroupHostMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
+      configGroupDAO.removeByPK(configGroupEntity.getGroupId());
+      cluster.refresh();
+      isPersisted = false;
     } finally {
-      cluster.getClusterGlobalLock().writeLock().unlock();
+      readWriteLock.writeLock().unlock();
     }
   }
 
@@ -526,40 +513,33 @@ public class ConfigGroupImpl implements ConfigGroup {
 
   @Override
   public ConfigGroupResponse convertToResponse() throws AmbariException {
-    cluster.getClusterGlobalLock().readLock().lock();
+    readWriteLock.readLock().lock();
     try {
-      readWriteLock.readLock().lock();
-      try {
-        Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
-        for (Host host : hosts.values()) {
-          Map<String, Object> hostMap = new HashMap<String, Object>();
-          hostMap.put("host_name", host.getHostName());
-          hostnames.add(hostMap);
-        }
+      Set<Map<String, Object>> hostnames = new HashSet<Map<String, Object>>();
+      for (Host host : hosts.values()) {
+        Map<String, Object> hostMap = new HashMap<String, Object>();
+        hostMap.put("host_name", host.getHostName());
+        hostnames.add(hostMap);
+      }
 
-        Set<Map<String, Object>> configObjMap = new HashSet<Map<String,
-          Object>>();
+      Set<Map<String, Object>> configObjMap = new HashSet<Map<String, Object>>();
 
-        for (Config config : configurations.values()) {
-          Map<String, Object> configMap = new HashMap<String, Object>();
-          configMap.put(ConfigurationResourceProvider
-            .CONFIGURATION_CONFIG_TYPE_PROPERTY_ID, config.getType());
-          configMap.put(ConfigurationResourceProvider
-            .CONFIGURATION_CONFIG_TAG_PROPERTY_ID, config.getTag());
-          configObjMap.add(configMap);
-        }
+      for (Config config : configurations.values()) {
+        Map<String, Object> configMap = new HashMap<String, Object>();
+        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
+            config.getType());
+        configMap.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID,
+            config.getTag());
+        configObjMap.add(configMap);
+      }
 
-        ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
+      ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(
           configGroupEntity.getGroupId(), cluster.getClusterName(),
           configGroupEntity.getGroupName(), configGroupEntity.getTag(),
-          configGroupEntity.getDescription(),
-          hostnames, configObjMap);
-        return configGroupResponse;
-      } finally {
-        readWriteLock.readLock().unlock();
-      }
+          configGroupEntity.getDescription(), hostnames, configObjMap);
+      return configGroupResponse;
     } finally {
-      cluster.getClusterGlobalLock().readLock().unlock();
+      readWriteLock.readLock().unlock();
     }
   }
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java

@@ -21,5 +21,5 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.state.Host;
 
 public interface HostFactory {
-  Host create(HostEntity hostEntity, boolean persisted);
+  Host create(HostEntity hostEntity);
 }

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 244 - 462
ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java


Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 289 - 484
ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java


+ 0 - 1
ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java

@@ -742,7 +742,6 @@ public class TopologyManager {
 
     if (null != rackInfoFromTemplate) {
       host.setRackInfo(rackInfoFromTemplate);
-      host.persist(); //todo this is required only if host is not persisted to database yet, is it really so?
       try {
         // todo: do we need this in case of blueprints?
         ambariContext.getController().registerRackChange(ambariContext.getClusterName(topology.getClusterId()));

+ 11 - 8
ambari-server/src/main/java/org/apache/ambari/server/utils/RetryHelper.java

@@ -17,23 +17,25 @@
  */
 package org.apache.ambari.server.utils;
 
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
 import org.eclipse.persistence.exceptions.DatabaseException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.Callable;
-
 /**
  * Provides utility methods to support operations retry
  * TODO injection as Guice singleon, static for now to avoid major modifications
  */
 public class RetryHelper {
   protected final static Logger LOG = LoggerFactory.getLogger(RetryHelper.class);
+  private static Clusters s_clusters;
 
   private static ThreadLocal<Set<Cluster>> affectedClusters = new ThreadLocal<Set<Cluster>>(){
     @Override
@@ -44,7 +46,8 @@ public class RetryHelper {
 
   private static int operationsRetryAttempts = 0;
 
-  public static void init(int operationsRetryAttempts) {
+  public static void init(Clusters clusters, int operationsRetryAttempts) {
+    s_clusters = clusters;
     RetryHelper.operationsRetryAttempts = operationsRetryAttempts;
   }
 
@@ -82,7 +85,8 @@ public class RetryHelper {
 
   public static void invalidateAffectedClusters() {
     for (Cluster cluster : affectedClusters.get()) {
-      cluster.invalidateData();
+      s_clusters.invalidate(cluster);
+      affectedClusters.get().remove(cluster);
     }
   }
 
@@ -90,7 +94,6 @@ public class RetryHelper {
     RetryHelper.clearAffectedClusters();
     int retryAttempts = RetryHelper.getOperationsRetryAttempts();
     do {
-
       try {
         return command.call();
       } catch (Exception e) {

+ 0 - 1
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java

@@ -103,7 +103,6 @@ public class ExecutionCommandWrapperTest {
 
     clusters = injector.getInstance(Clusters.class);
     clusters.addHost(HOST1);
-    clusters.getHost(HOST1).persist();
     clusters.addCluster(CLUSTER1, new StackId("HDP-0.1"));
 
     Cluster cluster1 = clusters.getCluster(CLUSTER1);

+ 0 - 9
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java

@@ -112,10 +112,7 @@ public class TestActionDBAccessorImpl {
 
     // Add this host's name since it is needed for server-side actions.
     clusters.addHost(serverHostName);
-    clusters.getHost(serverHostName).persist();
-
     clusters.addHost(hostName);
-    clusters.getHost(hostName).persist();
 
     StackId stackId = new StackId("HDP-0.1");
     clusters.addCluster(clusterName, stackId);
@@ -273,7 +270,6 @@ public class TestActionDBAccessorImpl {
     for (int i = 0; i < 1000; i++) {
       String hostName = "c64-" + i;
       clusters.addHost(hostName);
-      clusters.getHost(hostName).persist();
     }
 
     // create 1 request, 3 stages per host, each with 2 commands
@@ -460,7 +456,6 @@ public class TestActionDBAccessorImpl {
     requestIds.add(requestId);
     populateActionDB(db, hostName, requestId, stageId);
     clusters.addHost("host2");
-    clusters.getHost("host2").persist();
     populateActionDB(db, hostName, requestId + 1, stageId);
     List<Long> requestIdsResult =
       db.getRequestsByStatus(null, BaseRequest.DEFAULT_PAGE_SIZE, false);
@@ -546,11 +541,8 @@ public class TestActionDBAccessorImpl {
     s.setStageId(stageId);
 
     clusters.addHost("host2");
-    clusters.getHost("host2").persist();
     clusters.addHost("host3");
-    clusters.getHost("host3").persist();
     clusters.addHost("host4");
-    clusters.getHost("host4").persist();
 
     s.addHostRoleExecutionCommand("host1", Role.HBASE_MASTER,
         RoleCommand.START,
@@ -680,7 +672,6 @@ public class TestActionDBAccessorImpl {
       String host = "host" + i;
 
       clusters.addHost(host);
-      clusters.getHost(host).persist();
 
       s.addHostRoleExecutionCommand("host" + i, Role.HBASE_MASTER,
         RoleCommand.START, null, "cluster1", "HBASE", false, false);

+ 3 - 5
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java

@@ -24,21 +24,19 @@ import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.audit.AuditLogger;
-import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.events.publishers.JPAEventPublisher;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
@@ -57,7 +55,8 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.UnitOfWork;
-import static org.junit.Assert.assertNotNull;
+
+import junit.framework.Assert;
 
 public class TestActionManager {
 
@@ -79,7 +78,6 @@ public class TestActionManager {
     stageFactory = injector.getInstance(StageFactory.class);
 
     clusters.addHost(hostname);
-    clusters.getHost(hostname).persist();
     StackId stackId = new StackId("HDP-0.1");
     clusters.addCluster(clusterName, stackId);
     unitOfWork = injector.getInstance(UnitOfWork.class);

+ 8 - 4
ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java

@@ -18,10 +18,12 @@
 
 package org.apache.ambari.server.agent;
 
+import static org.easymock.EasyMock.createNiceMock;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import javax.persistence.EntityManager;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.ambari.server.RandomPortJerseyTest;
@@ -35,6 +37,7 @@ import org.apache.ambari.server.agent.rest.AgentResource;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.SecurityHelperImpl;
@@ -55,7 +58,6 @@ import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.ServiceImpl;
 import org.apache.ambari.server.state.cluster.ClusterFactory;
 import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.configgroup.ConfigGroupImpl;
@@ -66,6 +68,7 @@ import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.apache.ambari.server.state.scheduler.RequestExecutionImpl;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
+import org.apache.ambari.server.topology.PersistedState;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.codehaus.jettison.json.JSONException;
@@ -80,7 +83,6 @@ import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.assistedinject.FactoryModuleBuilder;
-import com.google.inject.persist.jpa.AmbariJpaPersistModule;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
@@ -296,7 +298,6 @@ public class AgentResourceTest extends RandomPortJerseyTest {
         // The test will fail anyway
       }
       requestStaticInjection(AgentResource.class);
-      bind(Clusters.class).to(ClustersImpl.class);
       os_family = mock(OsFamily.class);
       actionManager = mock(ActionManager.class);
       ambariMetaInfo = mock(AmbariMetaInfo.class);
@@ -311,10 +312,13 @@ public class AgentResourceTest extends RandomPortJerseyTest {
       bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
       bind(DBAccessor.class).toInstance(mock(DBAccessor.class));
       bind(HostRoleCommandDAO.class).toInstance(mock(HostRoleCommandDAO.class));
+      bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
+      bind(HostDAO.class).toInstance(createNiceMock(HostDAO.class));
+      bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
+      bind(PersistedState.class).toInstance(createNiceMock(PersistedState.class));
     }
 
     private void installDependencies() {
-      install(new AmbariJpaPersistModule("ambari-javadb"));
       install(new FactoryModuleBuilder().implement(
           Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
       install(new FactoryModuleBuilder().implement(

+ 59 - 73
ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java

@@ -59,7 +59,6 @@ import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.audit.AuditLogger;
 import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.HostDAO;
@@ -160,13 +159,12 @@ public class HeartbeatProcessorTest {
   public void testHeartbeatWithConfigs() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -230,9 +228,8 @@ public class HeartbeatProcessorTest {
   public void testRestartRequiredAfterInstallClient() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -296,13 +293,12 @@ public class HeartbeatProcessorTest {
   public void testHeartbeatCustomCommandWithConfigs() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -381,13 +377,12 @@ public class HeartbeatProcessorTest {
   public void testHeartbeatCustomStartStop() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -466,13 +461,12 @@ public class HeartbeatProcessorTest {
   public void testStatusHeartbeat() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -544,7 +538,6 @@ public class HeartbeatProcessorTest {
   public void testCommandReport() throws AmbariException {
     injector.injectMembers(this);
     clusters.addHost(DummyHostname1);
-    clusters.getHost(DummyHostname1).persist();
 
     StackId dummyStackId = new StackId(DummyStackId);
     clusters.addCluster(DummyCluster, dummyStackId);
@@ -594,12 +587,11 @@ public class HeartbeatProcessorTest {
   @Test
   @SuppressWarnings("unchecked")
   public void testCommandReportOnHeartbeatUpdatedState()
-      throws AmbariException, InvalidStateTransitionException {
+      throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -714,12 +706,11 @@ public class HeartbeatProcessorTest {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testUpgradeSpecificHandling() throws AmbariException, InvalidStateTransitionException {
+  public void testUpgradeSpecificHandling() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -812,9 +803,8 @@ public class HeartbeatProcessorTest {
   public void testCommandStatusProcesses() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
 
     ActionQueue aq = new ActionQueue();
@@ -891,16 +881,15 @@ public class HeartbeatProcessorTest {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testComponentUpgradeCompleteReport() throws AmbariException, InvalidStateTransitionException {
+  public void testComponentUpgradeCompleteReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
         getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -976,16 +965,15 @@ public class HeartbeatProcessorTest {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testComponentUpgradeFailReport() throws AmbariException, InvalidStateTransitionException {
+  public void testComponentUpgradeFailReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
         getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -1097,16 +1085,15 @@ public class HeartbeatProcessorTest {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testComponentUpgradeInProgressReport() throws AmbariException, InvalidStateTransitionException {
+  public void testComponentUpgradeInProgressReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
         getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -1307,13 +1294,12 @@ public class HeartbeatProcessorTest {
   public void testComponentInProgressStatusSafeAfterStatusReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).
-        addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
+        addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
     hdfs.getServiceComponent(NAMENODE).
-        addServiceComponentHost(DummyHostname1).persist();
+        addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 

+ 13 - 3
ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java

@@ -24,6 +24,7 @@ import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
 
+import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -60,6 +61,7 @@ import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 
@@ -135,7 +137,7 @@ public class HeartbeatTestHelper {
   }
 
   public Cluster getDummyCluster()
-      throws AmbariException {
+      throws Exception {
     Map<String, String> configProperties = new HashMap<String, String>() {{
       put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
       put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, "AUTO_START");
@@ -154,7 +156,7 @@ public class HeartbeatTestHelper {
 
   public Cluster getDummyCluster(String clusterName, String desiredStackId,
                                  Map<String, String> configProperties, Set<String> hostNames)
-      throws AmbariException {
+      throws Exception {
     StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
     org.junit.Assert.assertNotNull(stackEntity);
 
@@ -177,6 +179,14 @@ public class HeartbeatTestHelper {
 
     StackId stackId = new StackId(desiredStackId);
 
+    // because this test method goes around the Clusters business object, we
+    // forcefully will refresh the internal state so that any tests which
+    // incorrect use Clusters after calling this won't be affected
+    Clusters clusters = injector.getInstance(Clusters.class);
+    Method method = ClustersImpl.class.getDeclaredMethod("loadClustersAndHosts");
+    method.setAccessible(true);
+    method.invoke(clusters);
+
     Cluster cluster = clusters.getCluster(clusterName);
 
     cluster.setDesiredStackVersion(stackId);
@@ -203,12 +213,12 @@ public class HeartbeatTestHelper {
       clusters.addHost(hostName);
       Host host = clusters.getHost(hostName);
       host.setHostAttributes(hostAttributes);
-      host.persist();
 
       HostEntity hostEntity = hostDAO.findByName(hostName);
       Assert.assertNotNull(hostEntity);
       hostEntities.add(hostEntity);
     }
+
     clusterEntity.setHostEntities(hostEntities);
     clusters.mapHostsToCluster(hostNames, clusterName);
 

+ 63 - 73
ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java

@@ -160,7 +160,7 @@ public class TestHeartbeatHandler {
   }
 
   @After
-  public void teardown() throws AmbariException {
+  public void teardown() throws Exception {
     injector.getInstance(PersistService.class).stop();
     EasyMock.reset(auditLogger);
   }
@@ -221,10 +221,9 @@ public class TestHeartbeatHandler {
   public void testStatusHeartbeatWithAnnotation() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
 
     ActionQueue aq = new ActionQueue();
 
@@ -251,7 +250,7 @@ public class TestHeartbeatHandler {
     HeartBeatResponse resp = handler.handleHeartBeat(hb);
     Assert.assertFalse(resp.hasMappedComponents());
 
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
         getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
     serviceComponentHost1.setState(State.INIT);
@@ -273,13 +272,12 @@ public class TestHeartbeatHandler {
   public void testLiveStatusUpdateAfterStopFailed() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).
-            addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
+        addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
     hdfs.getServiceComponent(NAMENODE).
-            addServiceComponentHost(DummyHostname1).persist();
+        addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -345,7 +343,7 @@ public class TestHeartbeatHandler {
 
 
   @Test
-  public void testRegistration() throws AmbariException,
+  public void testRegistration() throws Exception,
       InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
@@ -376,7 +374,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testRegistrationRecoveryConfig() throws AmbariException,
+  public void testRegistrationRecoveryConfig() throws Exception,
       InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
@@ -385,18 +383,17 @@ public class TestHeartbeatHandler {
                                                     injector);
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
 
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
@@ -441,7 +438,7 @@ public class TestHeartbeatHandler {
   //
   @Test
   public void testRegistrationRecoveryConfigMaintenanceMode()
-          throws AmbariException, InvalidStateTransitionException {
+      throws Exception, InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
@@ -449,22 +446,21 @@ public class TestHeartbeatHandler {
             injector);
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     /**
      * Add three service components enabled for auto start.
      */
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
 
     hdfs.addServiceComponent(HDFS_CLIENT).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
@@ -495,7 +491,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testRegistrationAgentConfig() throws AmbariException,
+  public void testRegistrationAgentConfig() throws Exception,
       InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
@@ -527,7 +523,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testRegistrationWithBadVersion() throws AmbariException,
+  public void testRegistrationWithBadVersion() throws Exception,
       InvalidStateTransitionException {
 
     ActionManager am = actionManagerTestHelper.getMockActionManager();
@@ -570,7 +566,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testRegistrationPublicHostname() throws AmbariException, InvalidStateTransitionException {
+  public void testRegistrationPublicHostname() throws Exception, InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
@@ -602,7 +598,7 @@ public class TestHeartbeatHandler {
 
 
   @Test
-  public void testInvalidOSRegistration() throws AmbariException,
+  public void testInvalidOSRegistration() throws Exception,
       InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
@@ -630,7 +626,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testIncompatibleAgentRegistration() throws AmbariException,
+  public void testIncompatibleAgentRegistration() throws Exception,
           InvalidStateTransitionException {
 
     ActionManager am = actionManagerTestHelper.getMockActionManager();
@@ -660,7 +656,7 @@ public class TestHeartbeatHandler {
 
   @Test
   public void testRegisterNewNode()
-      throws AmbariException, InvalidStateTransitionException {
+      throws Exception, InvalidStateTransitionException {
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     replay(am);
     Clusters fsm = clusters;
@@ -745,7 +741,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testStateCommandsAtRegistration() throws AmbariException, InvalidStateTransitionException {
+  public void testStateCommandsAtRegistration() throws Exception, InvalidStateTransitionException {
     List<StatusCommand> dummyCmds = new ArrayList<StatusCommand>();
     StatusCommand statusCmd1 = new StatusCommand();
     statusCmd1.setClusterName(DummyCluster);
@@ -781,16 +777,15 @@ public class TestHeartbeatHandler {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
+  public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -839,16 +834,15 @@ public class TestHeartbeatHandler {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testOPFailedEventForAbortedTask() throws AmbariException, InvalidStateTransitionException {
+  public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(SECONDARY_NAMENODE);
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1);
 
     ActionQueue aq = new ActionQueue();
 
@@ -917,13 +911,12 @@ public class TestHeartbeatHandler {
   public void testStatusHeartbeatWithVersion() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(HDFS_CLIENT).persist();
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(HDFS_CLIENT);
+    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
     ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
         getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
@@ -995,11 +988,10 @@ public class TestHeartbeatHandler {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Host hostObject = clusters.getHost(DummyHostname1);
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
 
@@ -1077,11 +1069,10 @@ public class TestHeartbeatHandler {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Host hostObject = clusters.getHost(DummyHostname1);
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
+    hdfs.addServiceComponent(NAMENODE);
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
 
@@ -1281,7 +1272,7 @@ public class TestHeartbeatHandler {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testIgnoreCustomActionReport() throws AmbariException, InvalidStateTransitionException {
+  public void testIgnoreCustomActionReport() throws Exception, InvalidStateTransitionException {
     CommandReport cr1 = new CommandReport();
     cr1.setActionId(StageUtils.getActionId(requestId, stageId));
     cr1.setTaskId(1);
@@ -1343,7 +1334,7 @@ public class TestHeartbeatHandler {
   }
 
   @Test
-  public void testComponents() throws AmbariException,
+  public void testComponents() throws Exception,
       InvalidStateTransitionException {
     ComponentsResponse expected = new ComponentsResponse();
     StackId dummyStackId = new StackId(DummyStackId);
@@ -1412,9 +1403,8 @@ public class TestHeartbeatHandler {
   public void testCommandStatusProcesses_empty() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(DATANODE);
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
 
     ActionQueue aq = new ActionQueue();

+ 37 - 50
ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java

@@ -146,10 +146,8 @@ public class TestHeartbeatMonitor {
     Clusters clusters = injector.getInstance(Clusters.class);
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
-    clusters.getHost(hostname1).persist();
     clusters.addHost(hostname2);
     setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
-    clusters.getHost(hostname2).persist();
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
@@ -170,13 +168,12 @@ public class TestHeartbeatMonitor {
 
     clusters.mapHostsToCluster(hostNames, clusterName);
     Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+    hdfs.addServiceComponent(Role.DATANODE.name());
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
+    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
 
     hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
     hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -232,10 +229,8 @@ public class TestHeartbeatMonitor {
     Clusters clusters = injector.getInstance(Clusters.class);
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
-    clusters.getHost(hostname1).persist();
     clusters.addHost(hostname2);
     setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
-    clusters.getHost(hostname2).persist();
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
 
@@ -265,21 +260,20 @@ public class TestHeartbeatMonitor {
 
     clusters.mapHostsToCluster(hostNames, clusterName);
     Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+    hdfs.addServiceComponent(Role.DATANODE.name());
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost
-      (hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+    (hostname1);
+    hdfs.addServiceComponent(Role.NAMENODE.name());
     hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost
-      (hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
+    (hostname1);
+    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
     hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).
-      addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
+        addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
-      (hostname1).persist();
+    (hostname1);
     hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost
-      (hostname2).persist();
+    (hostname2);
 
     hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
     hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -360,7 +354,6 @@ public class TestHeartbeatMonitor {
     Clusters clusters = injector.getInstance(Clusters.class);
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "5.9");
-    clusters.getHost(hostname1).persist();
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
 
@@ -375,13 +368,12 @@ public class TestHeartbeatMonitor {
     clusters.mapHostsToCluster(hostNames, clusterName);
 
     Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+    hdfs.addServiceComponent(Role.DATANODE.name());
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
+    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
 
     hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
     hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
@@ -416,12 +408,12 @@ public class TestHeartbeatMonitor {
     hm.start();
     Thread.sleep(3 * heartbeatMonitorWakeupIntervalMS);
     hm.shutdown();
-    
+
     int tryNumber = 0;
     while(hm.isAlive()) {
       hm.join(2*heartbeatMonitorWakeupIntervalMS);
       tryNumber++;
-      
+
       if(tryNumber >= 5) {
         fail("HeartbeatMonitor should be already stopped");
       }
@@ -443,7 +435,6 @@ public class TestHeartbeatMonitor {
     Clusters clusters = injector.getInstance(Clusters.class);
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
-    clusters.getHost(hostname1).persist();
 
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
@@ -459,14 +450,13 @@ public class TestHeartbeatMonitor {
     clusters.mapHostsToCluster(hostNames, clusterName);
 
     Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
+    hdfs.addServiceComponent(Role.DATANODE.name());
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
+    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(hostname1);
 
     ActionQueue aq = new ActionQueue();
@@ -559,11 +549,9 @@ public class TestHeartbeatMonitor {
 
     clusters.addHost(hostname1);
     setOsFamily(clusters.getHost(hostname1), "redhat", "6.3");
-    clusters.getHost(hostname1).persist();
 
     clusters.addHost(hostname2);
     setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
-    clusters.getHost(hostname2).persist();
     clusters.addCluster(clusterName, stackId);
 
     Cluster cluster = clusters.getCluster(clusterName);
@@ -581,14 +569,13 @@ public class TestHeartbeatMonitor {
     clusters.mapHostsToCluster(hostNames, clusterName);
 
     Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
+
+    hdfs.addServiceComponent(Role.DATANODE.name());
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1);
+    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name());
+    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1);
 
     hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);
     hdfs.getServiceComponent(Role.NAMENODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED);

+ 3 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -87,6 +87,7 @@ import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptorFactory;
 import org.apache.ambari.server.state.stack.Metric;
 import org.apache.ambari.server.state.stack.MetricDefinition;
 import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.Capture;
@@ -1839,6 +1840,8 @@ public class AmbariMetaInfoTest {
     Injector injector = Guice.createInjector(Modules.override(
         new InMemoryDefaultTestModule()).with(new MockModule()));
 
+    EventBusSynchronizer.synchronizeAmbariEventPublisher(injector);
+
     injector.getInstance(GuiceJpaInitializer.class);
     injector.getInstance(EntityManager.class);
     long clusterId = injector.getInstance(OrmTestHelper.class).createCluster(

+ 33 - 10
ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java

@@ -18,20 +18,27 @@
 
 package org.apache.ambari.server.api.services;
 
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
+import static org.junit.Assert.assertEquals;
 
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
+import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.cluster.ClusterFactory;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
+import org.apache.ambari.server.state.host.HostFactory;
+import org.easymock.EasyMock;
 
 /**
  * Unit tests for ClusterService.
@@ -42,7 +49,17 @@ public class ClusterServiceTest extends BaseServiceTest {
   @Override
   public List<ServiceTestInvocation> getTestInvocations() throws Exception {
     List<ServiceTestInvocation> listInvocations = new ArrayList<ServiceTestInvocation>();
-    Clusters clusters = new TestClusters();
+
+    ClusterDAO clusterDAO = EasyMock.createNiceMock(ClusterDAO.class);
+    HostDAO hostDAO = EasyMock.createNiceMock(HostDAO.class);
+
+    EasyMock.expect(clusterDAO.findAll()).andReturn(new ArrayList<ClusterEntity>()).atLeastOnce();
+    EasyMock.expect(hostDAO.findAll()).andReturn(new ArrayList<HostEntity>()).atLeastOnce();
+
+    EasyMock.replay(clusterDAO, hostDAO);
+
+    Clusters clusters = new TestClusters(clusterDAO, EasyMock.createNiceMock(ClusterFactory.class),
+        hostDAO, EasyMock.createNiceMock(HostFactory.class));
 
     ClusterService clusterService;
     Method m;
@@ -161,6 +178,12 @@ public class ClusterServiceTest extends BaseServiceTest {
   }
 
   private class TestClusters extends ClustersImpl {
+    public TestClusters(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
+        HostFactory hostFactory) {
+
+      super(clusterDAO, clusterFactory, hostDAO, hostFactory);
+    }
+
     @Override
     public boolean checkPermission(String clusterName, boolean readOnly) {
       return true;

+ 1 - 4
ambari-server/src/test/java/org/apache/ambari/server/checks/InstallPackagesCheckTest.java

@@ -18,8 +18,8 @@
 package org.apache.ambari.server.checks;
 
 import java.util.ArrayList;
-import java.util.List;
 import java.util.Collections;
+import java.util.List;
 
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -44,8 +44,6 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
 import org.powermock.api.mockito.PowerMockito;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
@@ -147,7 +145,6 @@ public class InstallPackagesCheckTest {
     final List<HostVersionEntity> hostVersionEntities = new ArrayList<HostVersionEntity>();
     for(String hostName : hostNames) {
       Host host =  Mockito.mock(Host.class);
-      host.setHostName(hostName);
       Mockito.when(host.getHostName()).thenReturn(hostName);
       Mockito.when(host.getMaintenanceState(1L)).thenReturn(MaintenanceState.OFF);
       hosts.add(host);

+ 55 - 67
ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java

@@ -18,12 +18,20 @@
 
 package org.apache.ambari.server.configuration;
 
-import com.google.common.eventbus.EventBus;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.AmbariException;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.agent.HeartbeatTestHelper;
 import org.apache.ambari.server.agent.RecoveryConfig;
 import org.apache.ambari.server.agent.RecoveryConfigHelper;
@@ -40,19 +48,11 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import com.google.common.eventbus.EventBus;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
 
 /**
  * Test RecoveryConfigHelper class
@@ -86,7 +86,7 @@ public class RecoveryConfigHelperTest {
   }
 
   @After
-  public void teardown() throws AmbariException {
+  public void teardown() throws Exception {
     injector.getInstance(PersistService.class).stop();
   }
 
@@ -95,7 +95,7 @@ public class RecoveryConfigHelperTest {
    */
   @Test
   public void testRecoveryConfigDefaultValues()
-      throws AmbariException {
+      throws Exception {
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getDefaultRecoveryConfig();
     assertEquals(recoveryConfig.getMaxLifetimeCount(), RecoveryConfigHelper.RECOVERY_LIFETIME_MAX_COUNT_DEFAULT);
     assertEquals(recoveryConfig.getMaxCount(), RecoveryConfigHelper.RECOVERY_MAX_COUNT_DEFAULT);
@@ -107,11 +107,12 @@ public class RecoveryConfigHelperTest {
 
   /**
    * Test cluster-env properties from a dummy cluster
-   * @throws AmbariException
+   *
+   * @throws Exception
    */
   @Test
   public void testRecoveryConfigValues()
-      throws AmbariException {
+      throws Exception {
     String hostname = "hostname1";
     Cluster cluster = getDummyCluster(hostname);
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), hostname);
@@ -124,20 +125,19 @@ public class RecoveryConfigHelperTest {
   }
 
   /**
-   * Install a component with auto start enabled. Verify that the old config was invalidated.
+   * Install a component with auto start enabled. Verify that the old config was
+   * invalidated.
    *
-   * @throws AmbariException
+   * @throws Exception
    */
   @Test
   public void testServiceComponentInstalled()
-      throws AmbariException {
+      throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     // Get the recovery configuration
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -145,8 +145,7 @@ public class RecoveryConfigHelperTest {
 
     // Install HDFS::NAMENODE to trigger a component installed event
     hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
 
     // Verify that the config is stale now
     boolean isConfigStale = recoveryConfigHelper.isConfigStale(cluster.getClusterName(), DummyHostname1,
@@ -161,23 +160,20 @@ public class RecoveryConfigHelperTest {
 
   /**
    * Uninstall a component and verify that the config is stale.
-   * 
-   * @throws AmbariException
+   *
+   * @throws Exception
    */
   @Test
   public void testServiceComponentUninstalled()
-      throws AmbariException {
+      throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
 
     // Get the recovery configuration
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -200,18 +196,16 @@ public class RecoveryConfigHelperTest {
   /**
    * Disable cluster level auto start and verify that the config is stale.
    *
-   * @throws AmbariException
+   * @throws Exception
    */
   @Test
   public void testClusterEnvConfigChanged()
-      throws AmbariException {
+      throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setDesiredState(State.INSTALLED);
 
     // Get the recovery configuration
@@ -238,24 +232,22 @@ public class RecoveryConfigHelperTest {
   }
 
   /**
-   * Change the maintenance mode of a service component host and verify that config is stale.
+   * Change the maintenance mode of a service component host and verify that
+   * config is stale.
    *
-   * @throws AmbariException
+   * @throws Exception
    */
   @Test
   public void testMaintenanceModeChanged()
-      throws AmbariException {
+      throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
 
     // Get the recovery configuration
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -277,18 +269,16 @@ public class RecoveryConfigHelperTest {
   /**
    * Disable recovery on a component and verify that the config is stale.
    *
-   * @throws AmbariException
+   * @throws Exception
    */
   @Test
   public void testServiceComponentRecoveryChanged()
-      throws AmbariException {
+      throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
     // Get the recovery configuration
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
@@ -309,14 +299,14 @@ public class RecoveryConfigHelperTest {
   }
 
   /**
-   * Test a cluster with two hosts. The first host gets the configuration during registration.
-   * The second host gets it during it's first heartbeat.
+   * Test a cluster with two hosts. The first host gets the configuration during
+   * registration. The second host gets it during it's first heartbeat.
    *
-   * @throws AmbariException
+   * @throws Exception
    */
   @Test
   public void testMultiNodeCluster()
-    throws AmbariException {
+      throws Exception {
     Set<String> hostNames = new HashSet<String>() {{
       add("Host1");
       add("Host2");
@@ -327,14 +317,12 @@ public class RecoveryConfigHelperTest {
 
     // Add HDFS service with DATANODE component to the cluster
     Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
-    hdfs.getServiceComponent(DATANODE).persist();
 
     // Add SCH to Host1 and Host2
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host1").persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host2").persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host1");
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host2");
 
     // Simulate registration for Host1: Get the recovery configuration right away for Host1.
     // It makes an entry for cluster name and Host1 in the timestamp dictionary.
@@ -351,7 +339,7 @@ public class RecoveryConfigHelperTest {
   }
 
   private Cluster getDummyCluster(Set<String> hostNames)
-    throws AmbariException {
+      throws Exception {
     Map<String, String> configProperties = new HashMap<String, String>() {{
       put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
       put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, "AUTO_START");
@@ -365,7 +353,7 @@ public class RecoveryConfigHelperTest {
   }
 
   private Cluster getDummyCluster(final String hostname)
-          throws AmbariException {
+      throws Exception {
 
     Set<String> hostNames = new HashSet<String>(){{
       add(hostname);

+ 0 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java

@@ -512,7 +512,6 @@ public class AmbariCustomCommandExecutionHelperTest {
     clusters.addHost(hostname);
     setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
     clusters.getHost(hostname).setState(HostState.HEALTHY);
-    clusters.getHost(hostname).persist();
     if (null != clusterName) {
       clusters.mapHostToCluster(hostname, clusterName);
     }

+ 43 - 142
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.controller;
 
-import org.apache.ambari.server.controller.internal.DeleteStatusMetaData;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
@@ -79,10 +78,10 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.audit.AuditLogger;
-import org.apache.ambari.server.audit.AuditLoggerModule;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.internal.ClusterStackVersionResourceProviderTest;
 import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
+import org.apache.ambari.server.controller.internal.DeleteStatusMetaData;
 import org.apache.ambari.server.controller.internal.HostComponentResourceProviderTest;
 import org.apache.ambari.server.controller.internal.HostResourceProviderTest;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
@@ -171,7 +170,6 @@ import org.springframework.security.core.context.SecurityContextHolder;
 import com.google.common.collect.Lists;
 import com.google.gson.Gson;
 import com.google.gson.reflect.TypeToken;
-import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
@@ -313,7 +311,6 @@ public class AmbariManagementControllerTest {
       clusters.addHost(hostname);
       setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
       clusters.getHost(hostname).setState(HostState.HEALTHY);
-      clusters.getHost(hostname).persist();
     }
 
     if (null != clusterName) {
@@ -628,8 +625,6 @@ public class AmbariManagementControllerTest {
     clusters.addHost(host2);
     setOsFamily(clusters.getHost(host1), "redhat", "6.3");
     setOsFamily(clusters.getHost(host2), "redhat", "6.3");
-    clusters.getHost(host1).persist();
-    clusters.getHost(host2).persist();
 
     controller.createCluster(r);
     Assert.assertNotNull(clusters.getCluster(cluster1));
@@ -1011,8 +1006,6 @@ public class AmbariManagementControllerTest {
     Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
     c1.addService(s1);
     c1.addService(s2);
-    s1.persist();
-    s2.persist();
 
     set1.clear();
     ServiceComponentRequest valid1 =
@@ -1316,8 +1309,6 @@ public class AmbariManagementControllerTest {
     Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
     c1.addService(s1);
     c1.addService(s2);
-    s1.persist();
-    s2.persist();
 
     Set<ServiceComponentRequest> set1 = new HashSet<ServiceComponentRequest>();
     ServiceComponentRequest valid1 =
@@ -1627,13 +1618,10 @@ public class AmbariManagementControllerTest {
 
     Service s1 = serviceFactory.createNew(foo, "HDFS");
     foo.addService(s1);
-    s1.persist();
     Service s2 = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s2);
-    s2.persist();
     Service s3 = serviceFactory.createNew(c2, "HDFS");
     c2.addService(s3);
-    s3.persist();
 
 
     try {
@@ -1649,13 +1637,10 @@ public class AmbariManagementControllerTest {
 
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "NAMENODE");
     s1.addServiceComponent(sc1);
-    sc1.persist();
     ServiceComponent sc2 = serviceComponentFactory.createNew(s2, "NAMENODE");
     s2.addServiceComponent(sc2);
-    sc2.persist();
     ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "NAMENODE");
     s3.addServiceComponent(sc3);
-    sc3.persist();
 
 
     try {
@@ -1674,19 +1659,16 @@ public class AmbariManagementControllerTest {
     h1.setIPv4("ipv41");
     h1.setIPv6("ipv61");
     setOsFamily(h1, "redhat", "6.3");
-    h1.persist();
     clusters.addHost(host2);
     Host h2 = clusters.getHost(host2);
     h2.setIPv4("ipv42");
     h2.setIPv6("ipv62");
     setOsFamily(h2, "redhat", "6.3");
-    h2.persist();
     clusters.addHost(host3);
     Host h3 = clusters.getHost(host3);
     h3.setIPv4("ipv43");
     h3.setIPv6("ipv63");
     setOsFamily(h3, "redhat", "6.3");
-    h3.persist();
 
     try {
       set1.clear();
@@ -1808,8 +1790,6 @@ public class AmbariManagementControllerTest {
     clusters.addHost(host2);
     setOsFamily(clusters.getHost(host1), "redhat", "5.9");
     setOsFamily(clusters.getHost(host2), "redhat", "5.9");
-    clusters.getHost(host1).persist();
-    clusters.getHost(host2).persist();
 
     HostRequest request = new HostRequest(host2, "foo", new HashMap<String, String>());
     requests.add(request);
@@ -1864,9 +1844,6 @@ public class AmbariManagementControllerTest {
     setOsFamily(clusters.getHost(host1), "redhat", "5.9");
     setOsFamily(clusters.getHost(host2), "redhat", "5.9");
     setOsFamily(clusters.getHost(host3), "redhat", "5.9");
-    clusters.getHost(host1).persist();
-    clusters.getHost(host2).persist();
-    clusters.getHost(host3).persist();
 
     HostRequest r1 = new HostRequest(host1, cluster1, null);
     HostRequest r2 = new HostRequest(host2, cluster1, null);
@@ -2281,8 +2258,6 @@ public class AmbariManagementControllerTest {
     s1.setDesiredStackVersion(new StackId("HDP-0.1"));
     s1.setDesiredState(State.INSTALLED);
 
-    s1.persist();
-
     ServiceRequest r = new ServiceRequest(cluster1, null, null);
     Set<ServiceResponse> resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
 
@@ -2327,12 +2302,6 @@ public class AmbariManagementControllerTest {
     s2.setDesiredState(State.INSTALLED);
     s4.setDesiredState(State.INSTALLED);
 
-    s1.persist();
-    s2.persist();
-    s3.persist();
-    s4.persist();
-    s5.persist();
-
     ServiceRequest r = new ServiceRequest(null, null, null);
     Set<ServiceResponse> resp;
 
@@ -2391,10 +2360,8 @@ public class AmbariManagementControllerTest {
     Service s1 = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s1);
     s1.setDesiredState(State.INSTALLED);
-    s1.persist();
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     s1.addServiceComponent(sc1);
-    sc1.persist();
     sc1.setDesiredStackVersion(new StackId("HDP-0.1"));
     sc1.setDesiredState(State.UNINSTALLED);
 
@@ -2443,12 +2410,6 @@ public class AmbariManagementControllerTest {
     s2.setDesiredState(State.INSTALLED);
     s4.setDesiredState(State.INSTALLED);
 
-    s1.persist();
-    s2.persist();
-    s3.persist();
-    s4.persist();
-    s5.persist();
-
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
     ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
@@ -2478,15 +2439,6 @@ public class AmbariManagementControllerTest {
     sc7.setDesiredState(State.UNINSTALLED);
     sc8.setDesiredState(State.UNINSTALLED);
 
-    sc1.persist();
-    sc2.persist();
-    sc3.persist();
-    sc4.persist();
-    sc5.persist();
-    sc6.persist();
-    sc7.persist();
-    sc8.persist();
-
     ServiceComponentRequest r = new ServiceComponentRequest(null, null,
         null, null);
 
@@ -2561,11 +2513,9 @@ public class AmbariManagementControllerTest {
     Cluster c1 = setupClusterWithHosts(cluster1, "HDP-0.1", Lists.newArrayList(host1), "centos5");
     Service s1 = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s1);
-    s1.persist();
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     s1.addServiceComponent(sc1);
     sc1.setDesiredState(State.UNINSTALLED);
-    sc1.persist();
     ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, host1);
     sc1.addServiceComponentHost(sch1);
     sch1.setDesiredState(State.INSTALLED);
@@ -2573,8 +2523,6 @@ public class AmbariManagementControllerTest {
     sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
     sch1.setStackVersion(new StackId("HDP-0.1"));
 
-    sch1.persist();
-
     sch1.updateActualConfigs(new HashMap<String, Map<String,String>>() {{
       put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
     }});
@@ -2977,10 +2925,6 @@ public class AmbariManagementControllerTest {
     s1.setDesiredState(State.INSTALLED);
     s2.setDesiredState(State.INSTALLED);
 
-    s1.persist();
-    s2.persist();
-    s3.persist();
-
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
     ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
@@ -2993,10 +2937,6 @@ public class AmbariManagementControllerTest {
     sc1.setDesiredState(State.UNINSTALLED);
     sc3.setDesiredState(State.UNINSTALLED);
 
-    sc1.persist();
-    sc2.persist();
-    sc3.persist();
-
     ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, host1);
     ServiceComponentHost sch2 = serviceComponentHostFactory.createNew(sc1, host2);
     ServiceComponentHost sch3 = serviceComponentHostFactory.createNew(sc1, host3);
@@ -3016,13 +2956,6 @@ public class AmbariManagementControllerTest {
     sch4.setDesiredState(State.INSTALLED);
     sch5.setDesiredState(State.UNINSTALLED);
 
-    sch1.persist();
-    sch2.persist();
-    sch3.persist();
-    sch4.persist();
-    sch5.persist();
-    sch6.persist();
-
     ServiceComponentHostRequest r =
         new ServiceComponentHostRequest(null, null, null, null, null);
 
@@ -3136,7 +3069,6 @@ public class AmbariManagementControllerTest {
         "centos5");
     clusters.addHost(host4);
     setOsFamily(clusters.getHost(host4), "redhat", "5.9");
-    clusters.getHost(host4).persist();
 
     Map<String, String> attrs = new HashMap<String, String>();
     attrs.put("a1", "b1");
@@ -4018,9 +3950,7 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("testServiceComponentHostUpdateStackId", stages.get(0).getRequestContext());
     Assert.assertEquals(State.UPGRADING, sch1.getState());
     Assert.assertEquals(State.UPGRADING, sch2.getState());
-    sch1.refresh();
     Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
-    sch2.refresh();
     Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
     for (HostRoleCommand command : stages.get(0).getOrderedHostRoleCommands()) {
       ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
@@ -4064,11 +3994,8 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(State.UPGRADING, sch1.getState());
     Assert.assertEquals(State.UPGRADING, sch2.getState());
     Assert.assertEquals(State.UPGRADING, sch3.getState());
-    sch1.refresh();
     Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
-    sch2.refresh();
     Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
-    sch3.refresh();
     Assert.assertTrue(sch3.getDesiredStackVersion().compareTo(newStack) == 0);
     for (Stage stage : stages) {
       for (HostRoleCommand command : stage.getOrderedHostRoleCommands()) {
@@ -4299,21 +4226,18 @@ public class AmbariManagementControllerTest {
     cluster.addConfig(config3);
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
-
     Service mapred = cluster.addService("YARN");
-    mapred.persist();
 
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.addServiceComponent(Role.DATANODE.name());
 
-    mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+    mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
 
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2).persist();
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2);
 
     String actionDef1 = getUniqueName();
     String actionDef2 = getUniqueName();
@@ -4468,15 +4392,14 @@ public class AmbariManagementControllerTest {
     cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
 
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.addServiceComponent(Role.DATANODE.name());
 
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
 
     installService(cluster1, "HDFS", false, false);
 
@@ -4578,20 +4501,17 @@ public class AmbariManagementControllerTest {
     cluster.addDesiredConfig("_test", Collections.singleton(config2));
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
-
     Service hive = cluster.addService("HIVE");
-    hive.persist();
 
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.addServiceComponent(Role.DATANODE.name());
 
-    hive.addServiceComponent(Role.HIVE_SERVER.name()).persist();
+    hive.addServiceComponent(Role.HIVE_SERVER.name());
 
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
 
     Map<String, String> params = new HashMap<String, String>() {{
       put("test", "test");
@@ -4866,14 +4786,12 @@ public class AmbariManagementControllerTest {
 
     Service hdfs = cluster.addService("HDFS");
     Service mapReduce = cluster.addService("MAPREDUCE");
-    hdfs.persist();
-    mapReduce.persist();
 
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name()).persist();
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+    mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name());
 
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
-    mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost(host2).persist();
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+    mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost(host2);
 
     Map<String, String> params = new HashMap<String, String>() {{
       put("test", "test");
@@ -6539,21 +6457,18 @@ public class AmbariManagementControllerTest {
     cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
-
     Service mapred = cluster.addService("YARN");
-    mapred.persist();
 
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.addServiceComponent(Role.DATANODE.name());
 
-    mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+    mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
 
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2).persist();
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2);
 
     String action1 = getUniqueName();
 
@@ -6648,23 +6563,20 @@ public class AmbariManagementControllerTest {
     cluster.addConfig(config2);
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
-
     Service mapred = cluster.addService("YARN");
-    mapred.persist();
 
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
+    hdfs.addServiceComponent(Role.NAMENODE.name());
+    hdfs.addServiceComponent(Role.DATANODE.name());
 
-    mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+    mapred.addServiceComponent(Role.RESOURCEMANAGER.name());
 
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2).persist();
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host1);
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(host2);
 
-    mapred.getServiceComponent(Role.RESOURCEMANAGER.name()).addServiceComponentHost(host2).persist();
+    mapred.getServiceComponent(Role.RESOURCEMANAGER.name()).addServiceComponentHost(host2);
 
     Map<String, String> params = new HashMap<String, String>() {{
       put("test", "test");
@@ -8370,7 +8282,6 @@ public class AmbariManagementControllerTest {
         RepositoryVersionState.INSTALLING);
     clusters.addHost(hostName1);
     setOsFamily(clusters.getHost(hostName1), "redhat", "5.9");
-    clusters.getHost(hostName1).persist();
 
     clusters.mapHostsToCluster(new HashSet<String>(){
       {add(hostName1);}}, cluster1);
@@ -9346,13 +9257,10 @@ public class AmbariManagementControllerTest {
       clusters.addHost("host3");
       Host host = clusters.getHost("host1");
       setOsFamily(host, "redhat", "6.3");
-      host.persist();
       host = clusters.getHost("host2");
       setOsFamily(host, "redhat", "6.3");
-      host.persist();
       host = clusters.getHost("host3");
       setOsFamily(host, "redhat", "6.3");
-      host.persist();
 
       ClusterRequest clusterRequest = new ClusterRequest(null, cluster1, "HDP-1.2.0", null);
       amc.createCluster(clusterRequest);
@@ -9396,13 +9304,11 @@ public class AmbariManagementControllerTest {
       Host host = clusters.getHost(HOST1);
       setOsFamily(host, "redhat", "6.3");
       clusters.getHost(HOST1).setState(HostState.HEALTHY);
-      host.persist();
 
       clusters.addHost(HOST2);
       host = clusters.getHost(HOST2);
       setOsFamily(host, "redhat", "6.3");
       clusters.getHost(HOST1).setState(HostState.HEALTHY);
-      host.persist();
 
       AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
 
@@ -9492,13 +9398,10 @@ public class AmbariManagementControllerTest {
     clusters.addHost(host3);
     Host host = clusters.getHost("host1");
     setOsFamily(host, "redhat", "5.9");
-    host.persist();
     host = clusters.getHost("host2");
     setOsFamily(host, "redhat", "5.9");
-    host.persist();
     host = clusters.getHost("host3");
     setOsFamily(host, "redhat", "5.9");
-    host.persist();
 
     ClusterRequest clusterRequest = new ClusterRequest(null, cluster1, "HDP-1.2.0", null);
     amc.createCluster(clusterRequest);
@@ -9797,7 +9700,6 @@ public class AmbariManagementControllerTest {
     clusters.addHost(HOST1);
     Host host = clusters.getHost(HOST1);
     setOsFamily(host, "redhat", "5.9");
-    host.persist();
 
     ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
     amc.createCluster(clusterRequest);
@@ -10120,7 +10022,6 @@ public class AmbariManagementControllerTest {
       "centos5");
 
     Service hdfs = c1.addService("HDFS");
-    hdfs.persist();
     createServiceComponent(cluster1, "HDFS", "NAMENODE", State.INIT);
     createServiceComponent(cluster1, "HDFS", "DATANODE", State.INIT);
     createServiceComponent(cluster1, "HDFS", "HDFS_CLIENT", State.INIT);

+ 0 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java

@@ -178,7 +178,6 @@ public class BackgroundCustomCommandExecutionTest {
     clusters.addHost(hostname);
     setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
     clusters.getHost(hostname).setState(HostState.HEALTHY);
-    clusters.getHost(hostname).persist();
     if (null != clusterName) {
       clusters.mapHostToCluster(hostname, clusterName);
     }

+ 41 - 39
ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java

@@ -18,10 +18,42 @@
 
 package org.apache.ambari.server.controller;
 
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.getCurrentArguments;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.lang.reflect.Method;
+import java.net.InetAddress;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import javax.persistence.EntityManager;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.actionmanager.ActionManager;
@@ -76,7 +108,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
 import org.apache.ambari.server.state.host.HostFactory;
 import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
@@ -96,45 +127,16 @@ import org.easymock.CaptureType;
 import org.easymock.EasyMockSupport;
 import org.easymock.IAnswer;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
-import javax.persistence.EntityManager;
-import java.io.File;
-import java.lang.reflect.Method;
-import java.net.InetAddress;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
 
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.getCurrentArguments;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
 
 @SuppressWarnings("unchecked")
 public class KerberosHelperTest extends EasyMockSupport {
@@ -227,7 +229,7 @@ public class KerberosHelperTest extends EasyMockSupport {
         bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
         bind(StageFactory.class).toInstance(createNiceMock(StageFactory.class));
         bind(RoleGraphFactory.class).to(RoleGraphFactoryImpl.class);
-        bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
+        bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
         bind(ConfigHelper.class).toInstance(createNiceMock(ConfigHelper.class));
         bind(KerberosOperationHandlerFactory.class).toInstance(kerberosOperationHandlerFactory);
         bind(ClusterController.class).toInstance(clusterController);

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java

@@ -24,8 +24,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
 import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
@@ -46,11 +44,13 @@ import org.apache.ambari.server.state.State;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.springframework.security.core.context.SecurityContextHolder;
 
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
-import org.springframework.security.core.context.SecurityContextHolder;
+
+import junit.framework.Assert;
 
 @SuppressWarnings("serial")
 public class RefreshYarnCapacitySchedulerReleaseConfigTest {
@@ -177,7 +177,6 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
     clusters.addHost(hostname);
     setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
     clusters.getHost(hostname).setState(HostState.HEALTHY);
-    clusters.getHost(hostname).persist();
     if (null != clusterName) {
       clusters.mapHostToCluster(hostname, clusterName);
     }

+ 5 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java

@@ -255,7 +255,10 @@ public class ClusterResourceProviderTest {
 
   @Test
   public void testCreateResourcesWithRetry() throws Exception {
-    RetryHelper.init(3);
+    Clusters clusters = createMock(Clusters.class);
+    EasyMock.replay(clusters);
+
+    RetryHelper.init(clusters, 3);
     Resource.Type type = Resource.Type.Cluster;
 
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
@@ -309,7 +312,7 @@ public class ClusterResourceProviderTest {
     // verify
     verify(managementController, response);
 
-    RetryHelper.init(0);
+    RetryHelper.init(clusters, 0);
 
   }
 

+ 0 - 6
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java

@@ -170,14 +170,12 @@ public class JMXHostProviderTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "5.9");
     clusters.getHost("h1").setHostAttributes(hostAttributes);
-    clusters.getHost("h1").persist();
     String host2 = "h2";
     clusters.addHost(host2);
     hostAttributes = new HashMap<String, String>();
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.3");
     clusters.getHost("h2").setHostAttributes(hostAttributes);
-    clusters.getHost("h2").persist();
     clusters.mapHostToCluster(host1, clusterName);
     clusters.mapHostToCluster(host2, clusterName);
 
@@ -262,14 +260,12 @@ public class JMXHostProviderTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "5.9");
     clusters.getHost("h1").setHostAttributes(hostAttributes);
-    clusters.getHost("h1").persist();
     String host2 = "h2";
     clusters.addHost(host2);
     hostAttributes = new HashMap<String, String>();
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.3");
     clusters.getHost("h2").setHostAttributes(hostAttributes);
-    clusters.getHost("h2").persist();
     clusters.mapHostToCluster(host1, clusterName);
     clusters.mapHostToCluster(host2, clusterName);
 
@@ -365,14 +361,12 @@ public class JMXHostProviderTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "5.9");
     clusters.getHost("h1").setHostAttributes(hostAttributes);
-    clusters.getHost("h1").persist();
     String host2 = "h2";
     clusters.addHost(host2);
     hostAttributes = new HashMap<String, String>();
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.3");
     clusters.getHost("h2").setHostAttributes(hostAttributes);
-    clusters.getHost("h2").persist();
     clusters.mapHostToCluster(host1, clusterName);
     clusters.mapHostToCluster(host2, clusterName);
 

+ 16 - 36
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PreUpgradeCheckResourceProviderTest.java

@@ -18,32 +18,40 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
+
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionDBAccessor;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.checks.AbstractCheckDescriptor;
 import org.apache.ambari.server.checks.UpgradeCheckRegistry;
 import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.events.jpa.EntityManagerCacheInvalidationEvent;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.scheduler.ExecutionScheduler;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.CheckHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.ServiceInfo;
@@ -56,41 +64,14 @@ import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
-import org.easymock.EasyMock;
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.io.File;
-import java.lang.reflect.Field;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import javax.persistence.EntityManager;
-
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.Provider;
 
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
 /**
  * PreUpgradeCheckResourceProvider tests.
  */
@@ -126,7 +107,6 @@ public class PreUpgradeCheckResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
 
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
     expect(cluster.getServices()).andReturn(allServiceMap).anyTimes();

+ 28 - 35
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java

@@ -18,6 +18,31 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.isNull;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -57,31 +82,6 @@ import org.junit.Test;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.context.SecurityContextHolder;
 
-import java.lang.reflect.Field;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.anyBoolean;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.isNull;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
 /**
  * ServiceResourceProvider tests.
  */
@@ -117,9 +117,8 @@ public class ServiceResourceProviderTest {
 
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo);
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory);
 
-    expect(serviceFactory.createNew(cluster, "Service100")).andReturn(service);
+    expect(cluster.addService("Service100")).andReturn(service);
 
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
 
@@ -202,7 +201,6 @@ public class ServiceResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
     expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
         andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
 
@@ -321,7 +319,6 @@ public class ServiceResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
     expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
         andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
 
@@ -390,7 +387,6 @@ public class ServiceResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
     expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
         andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
 
@@ -458,7 +454,6 @@ public class ServiceResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
     expect(managementController.getHostComponents(EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).
         andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
 
@@ -528,7 +523,6 @@ public class ServiceResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
     expect(managementController.getHostComponents((Set<ServiceComponentHostRequest>) anyObject())).
         andReturn(Collections.<ServiceComponentHostResponse>emptySet()).anyTimes();
 
@@ -612,7 +606,6 @@ public class ServiceResourceProviderTest {
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
 
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
 
@@ -824,7 +817,7 @@ public class ServiceResourceProviderTest {
     Clusters clusters = createNiceMock(Clusters.class);
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
-    
+
     String serviceName = "Service100";
 
     // set expectations
@@ -912,7 +905,7 @@ public class ServiceResourceProviderTest {
 
     // verify
     verify(managementController, clusters, cluster, service);
-  }  
+  }
 
   @Test
   public void testDeleteResourcesBadComponentState() throws Exception{

+ 0 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java

@@ -146,7 +146,6 @@ public class StackDefinedPropertyProviderTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     clusters.mapHostToCluster("h1", "c2");
 

+ 0 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java

@@ -174,14 +174,12 @@ public class UpgradeResourceProviderHDP22Test {
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
     host.setState(HostState.HEALTHY);
-    host.persist();
 
     clusters.mapHostToCluster("h1", "c1");
 
     // add a single HIVE server
     Service service = cluster.addService("HIVE");
     service.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    service.persist();
 
     ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");

+ 0 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java

@@ -232,14 +232,12 @@ public class UpgradeResourceProviderTest {
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
     host.setState(HostState.HEALTHY);
-    host.persist();
 
     clusters.mapHostToCluster("h1", "c1");
 
     // add a single ZK server
     Service service = cluster.addService("ZOOKEEPER");
     service.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    service.persist();
 
     ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");
@@ -590,7 +588,6 @@ public class UpgradeResourceProviderTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     clusters.mapHostToCluster("h2", "c1");
     Cluster cluster = clusters.getCluster("c1");
@@ -736,7 +733,6 @@ public class UpgradeResourceProviderTest {
     // add additional service for the test
     Service service = cluster.addService("HIVE");
     service.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    service.persist();
 
     ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");

+ 24 - 24
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java

@@ -17,15 +17,21 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.persist.PersistService;
-import com.google.inject.persist.Transactional;
-import com.google.inject.util.Modules;
-import junit.framework.Assert;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyString;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
@@ -74,20 +80,16 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.persist.PersistService;
+import com.google.inject.persist.Transactional;
+import com.google.inject.util.Modules;
 
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import junit.framework.Assert;
 
 /**
  * UpgradeSummaryResourceProvider tests.
@@ -170,14 +172,12 @@ public class UpgradeSummaryResourceProviderTest {
     hostAttributes.put("os_release_version", "6.4");
     host.setHostAttributes(hostAttributes);
     host.setState(HostState.HEALTHY);
-    host.persist();
 
     clusters.mapHostToCluster("h1", "c1");
 
     // add a single ZOOKEEPER server
     Service service = cluster.addService("ZOOKEEPER");
     service.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    service.persist();
 
     ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");

+ 3 - 3
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java

@@ -19,14 +19,15 @@
 package org.apache.ambari.server.controller.utilities.state;
 
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
-import java.util.HashMap;
-import java.util.Map;
 
 public final class DefaultServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest {
 
@@ -54,7 +55,6 @@ public final class DefaultServiceCalculatedStateTest extends GeneralServiceCalcu
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = masterComponent.addServiceComponentHost(hostName);

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java

@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.controller.utilities.state;
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -25,9 +28,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
 
-import java.util.HashMap;
-import java.util.Map;
-
 public class FlumeServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
   @Override
   protected String getServiceName() {
@@ -53,7 +53,6 @@ public class FlumeServiceCalculatedStateTest extends GeneralServiceCalculatedSta
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = masterComponent.addServiceComponentHost(hostName);

+ 24 - 23
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java

@@ -18,30 +18,32 @@
 
 package org.apache.ambari.server.controller.utilities.state;
 
-  import com.google.inject.Binder;
-  import com.google.inject.Guice;
-  import com.google.inject.Inject;
-  import com.google.inject.Injector;
-  import com.google.inject.Module;
-  import com.google.inject.persist.PersistService;
-  import com.google.inject.util.Modules;
-  import org.apache.ambari.server.actionmanager.ActionManager;
-  import org.apache.ambari.server.orm.GuiceJpaInitializer;
-  import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-  import org.apache.ambari.server.state.Cluster;
-  import org.apache.ambari.server.state.Clusters;
-  import org.apache.ambari.server.state.Service;
-  import org.apache.ambari.server.state.ServiceComponent;
-  import org.apache.ambari.server.state.ServiceComponentHost;
-  import org.apache.ambari.server.state.StackId;
-  import org.apache.ambari.server.state.State;
-  import org.apache.ambari.server.topology.TopologyManager;
-  import org.apache.ambari.server.utils.StageUtils;
-  import org.junit.After;
-  import org.junit.Before;
-  import org.junit.Test;
   import java.util.Map;
 
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.StageUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.persist.PersistService;
+import com.google.inject.util.Modules;
+
 public abstract class GeneralServiceCalculatedStateTest {
 
   final protected String[] hosts = {"h1", "h2"};
@@ -76,7 +78,6 @@ public abstract class GeneralServiceCalculatedStateTest {
 
     service = cluster.addService(getServiceName());
     service.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    service.persist();
 
     createComponentsAndHosts();
 

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java

@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.controller.utilities.state;
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -25,9 +28,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
 
-import java.util.HashMap;
-import java.util.Map;
-
 public class HBaseServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
   @Override
   protected String getServiceName() {
@@ -54,7 +54,6 @@ public class HBaseServiceCalculatedStateTest extends GeneralServiceCalculatedSta
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = clientComponent.addServiceComponentHost(hostName);

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java

@@ -19,6 +19,9 @@
 package org.apache.ambari.server.controller.utilities.state;
 
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -26,9 +29,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
 
-import java.util.HashMap;
-import java.util.Map;
-
 public class HDFSServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
   @Override
   protected String getServiceName() {
@@ -55,7 +55,6 @@ public class HDFSServiceCalculatedStateTest extends GeneralServiceCalculatedStat
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = masterComponent.addServiceComponentHost(hostName);

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java

@@ -19,6 +19,9 @@
 package org.apache.ambari.server.controller.utilities.state;
 
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -26,9 +29,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
 
-import java.util.HashMap;
-import java.util.Map;
-
 public class HiveServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
   @Override
   protected String getServiceName() {
@@ -57,7 +57,6 @@ public class HiveServiceCalculatedStateTest extends GeneralServiceCalculatedStat
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = clientComponent.addServiceComponentHost(hostName);

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java

@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.controller.utilities.state;
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -25,9 +28,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
 
-import java.util.HashMap;
-import java.util.Map;
-
 public class OozieServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest{
   @Override
   protected String getServiceName() {
@@ -53,7 +53,6 @@ public class OozieServiceCalculatedStateTest extends GeneralServiceCalculatedSta
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = clientComponent.addServiceComponentHost(hostName);

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java

@@ -19,6 +19,9 @@
 package org.apache.ambari.server.controller.utilities.state;
 
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -26,9 +29,6 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.State;
 import org.junit.Assert;
 
-import java.util.HashMap;
-import java.util.Map;
-
 public class YarnServiceCalculatedStateTest extends GeneralServiceCalculatedStateTest {
   @Override
   protected String getServiceName() {
@@ -55,7 +55,6 @@ public class YarnServiceCalculatedStateTest extends GeneralServiceCalculatedStat
       hostAttributes.put("os_release_version", "6.3");
       host.setHostAttributes(hostAttributes);
       host.setState(HostState.HEALTHY);
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
 
       ServiceComponentHost sch = secondMasterComponent.addServiceComponentHost(hostName);

+ 0 - 5
ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java

@@ -110,7 +110,6 @@ public class EventsTest {
     hostAttributes.put("os_release_version", "6.4");
     host.setHostAttributes(hostAttributes);
     host.setState(HostState.HEALTHY);
-    host.persist();
 
     m_cluster = m_clusters.getCluster(m_clusterName);
     Assert.assertNotNull(m_cluster);
@@ -382,14 +381,12 @@ public class EventsTest {
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = m_serviceFactory.createNew(m_cluster, serviceName);
-    service.persist();
     service = m_cluster.getService(serviceName);
     Assert.assertNotNull(service);
 
     ServiceComponent component = m_componentFactory.createNew(service, "DATANODE");
     service.addServiceComponent(component);
     component.setDesiredState(State.INSTALLED);
-    component.persist();
 
     ServiceComponentHost sch = m_schFactory.createNew(component, HOSTNAME);
 
@@ -398,7 +395,5 @@ public class EventsTest {
     sch.setState(State.INSTALLED);
     sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     sch.setStackVersion(new StackId("HDP-2.0.6"));
-
-    sch.persist();
   }
 }

+ 0 - 3
ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java

@@ -387,7 +387,6 @@ public class HostVersionOutOfSyncListenerTest {
     addHost("h2");
     clusters.mapHostToCluster("h2", "c1");
     clusters.getHost("h2").setState(HostState.HEALTHY);
-    clusters.getHost("h2").persist();
 
     StackId stackId = new StackId(this.stackId);
     RepositoryVersionEntity repositoryVersionEntity = helper.getOrCreateRepositoryVersion(stackId,
@@ -445,8 +444,6 @@ public class HostVersionOutOfSyncListenerTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.4");
     host1.setHostAttributes(hostAttributes);
-
-    host1.persist();
   }
 
   private void addService(Cluster cl, List<String> hostList,

+ 17 - 17
ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java

@@ -18,12 +18,11 @@
 
 package org.apache.ambari.server.orm;
 
-import javax.persistence.EntityManager;
-import junit.framework.Assert;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
+import java.lang.reflect.Method;
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -35,6 +34,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
 
+import javax.persistence.EntityManager;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
@@ -88,6 +89,7 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.alert.Scope;
 import org.apache.ambari.server.state.alert.SourceType;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
 import org.springframework.security.crypto.password.PasswordEncoder;
 
 import com.google.inject.Inject;
@@ -96,6 +98,8 @@ import com.google.inject.Provider;
 import com.google.inject.Singleton;
 import com.google.inject.persist.Transactional;
 
+import junit.framework.Assert;
+
 @Singleton
 public class OrmTestHelper {
 
@@ -308,7 +312,7 @@ public class OrmTestHelper {
    * @return the cluster ID.
    */
   @Transactional
-  public Long createCluster() {
+  public Long createCluster() throws Exception {
     return createCluster(CLUSTER_NAME);
   }
 
@@ -318,7 +322,7 @@ public class OrmTestHelper {
    * @return the cluster ID.
    */
   @Transactional
-  public Long createCluster(String clusterName) {
+  public Long createCluster(String clusterName) throws Exception {
     // required to populate the database with stacks
     injector.getInstance(AmbariMetaInfo.class);
 
@@ -354,6 +358,15 @@ public class OrmTestHelper {
     clusterEntity = clusterDAO.findByName(clusterEntity.getClusterName());
     assertNotNull(clusterEntity);
     assertTrue(clusterEntity.getClusterId() > 0);
+
+    // because this test method goes around the Clusters business object, we
+    // forcefully will refresh the internal state so that any tests which
+    // incorrect use Clusters after calling this won't be affected
+    Clusters clusters = injector.getInstance(Clusters.class);
+    Method method = ClustersImpl.class.getDeclaredMethod("loadClustersAndHosts");
+    method.setAccessible(true);
+    method.invoke(clusters);
+
     return clusterEntity.getClusterId();
   }
 
@@ -397,7 +410,6 @@ public class OrmTestHelper {
     hostAttributes.put("os_release_version", "6.4");
     host.setHostAttributes(hostAttributes);
     host.setState(HostState.HEALTHY);
-    host.persist();
 
     clusters.mapHostToCluster(hostName, cluster.getClusterName());
   }
@@ -407,7 +419,6 @@ public class OrmTestHelper {
     ServiceComponent serviceComponent = service.getServiceComponent(componentName);
     ServiceComponentHost serviceComponentHost = serviceComponent.addServiceComponentHost(hostName);
     serviceComponentHost.setDesiredState(State.INSTALLED);
-    serviceComponentHost.persist();
   }
 
   /**
@@ -419,7 +430,6 @@ public class OrmTestHelper {
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
     String serviceName = "HDFS";
     Service service = serviceFactory.createNew(cluster, serviceName);
-    service.persist();
     service = cluster.getService(serviceName);
     assertNotNull(service);
 
@@ -427,7 +437,6 @@ public class OrmTestHelper {
 
     service.addServiceComponent(datanode);
     datanode.setDesiredState(State.INSTALLED);
-    datanode.persist();
 
     ServiceComponentHost sch = schFactory.createNew(datanode, hostName);
 
@@ -437,13 +446,10 @@ public class OrmTestHelper {
     sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     sch.setStackVersion(new StackId("HDP-2.0.6"));
 
-    sch.persist();
-
     ServiceComponent namenode = componentFactory.createNew(service, "NAMENODE");
 
     service.addServiceComponent(namenode);
     namenode.setDesiredState(State.INSTALLED);
-    namenode.persist();
 
     sch = schFactory.createNew(namenode, hostName);
     namenode.addServiceComponentHost(sch);
@@ -451,8 +457,6 @@ public class OrmTestHelper {
     sch.setState(State.INSTALLED);
     sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     sch.setStackVersion(new StackId("HDP-2.0.6"));
-
-    sch.persist();
   }
 
   /**
@@ -464,7 +468,6 @@ public class OrmTestHelper {
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
     String serviceName = "YARN";
     Service service = serviceFactory.createNew(cluster, serviceName);
-    service.persist();
     service = cluster.getService(serviceName);
     assertNotNull(service);
 
@@ -473,7 +476,6 @@ public class OrmTestHelper {
 
     service.addServiceComponent(resourceManager);
     resourceManager.setDesiredState(State.INSTALLED);
-    resourceManager.persist();
 
     ServiceComponentHost sch = schFactory.createNew(resourceManager, hostName);
 
@@ -482,8 +484,6 @@ public class OrmTestHelper {
     sch.setState(State.INSTALLED);
     sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     sch.setStackVersion(new StackId("HDP-2.0.6"));
-
-    sch.persist();
   }
 
   /**

+ 6 - 6
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java

@@ -73,7 +73,7 @@ public class ClusterVersionDAOTest {
    * Helper function to transition the cluster through several cluster versions.
    * @param currStep Step to go to is a value from 1 - 7, inclusive.
    */
-  private void createRecordsUntilStep(int currStep) {
+  private void createRecordsUntilStep(int currStep) throws Exception {
     // Fresh install on A
     if (currStep >= 1 && lastStep <= 0) {
       clusterId = helper.createCluster();
@@ -147,7 +147,7 @@ public class ClusterVersionDAOTest {
   }
 
   @Test
-  public void testFindByStackAndVersion() {
+  public void testFindByStackAndVersion() throws Exception {
     createRecordsUntilStep(1);
     Assert.assertEquals(
         0,
@@ -161,14 +161,14 @@ public class ClusterVersionDAOTest {
   }
 
   @Test
-  public void testFindByCluster() {
+  public void testFindByCluster() throws Exception {
     createRecordsUntilStep(1);
     Assert.assertEquals(0, clusterVersionDAO.findByCluster("non existing").size());
     Assert.assertEquals(1, clusterVersionDAO.findByCluster(cluster.getClusterName()).size());
   }
 
   @Test
-  public void testFindByClusterAndStackAndVersion() {
+  public void testFindByClusterAndStackAndVersion() throws Exception {
     createRecordsUntilStep(1);
     Assert.assertNull(clusterVersionDAO.findByClusterAndStackAndVersion(
         cluster.getClusterName(), BAD_STACK, "non existing"));
@@ -181,7 +181,7 @@ public class ClusterVersionDAOTest {
    * At all times the cluster should have a cluster version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}
    */
   @Test
-  public void testFindByClusterAndStateCurrent() {
+  public void testFindByClusterAndStateCurrent() throws Exception {
     createRecordsUntilStep(1);
     Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
 
@@ -208,7 +208,7 @@ public class ClusterVersionDAOTest {
    * Test the state of certain cluster versions.
    */
   @Test
-  public void testFindByClusterAndState() {
+  public void testFindByClusterAndState() throws Exception {
     createRecordsUntilStep(1);
     Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
     Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());

+ 8 - 25
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ConfigGroupDAOTest.java

@@ -22,9 +22,6 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 
-import com.google.inject.assistedinject.AssistedInject;
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -36,10 +33,9 @@ import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.host.HostFactory;
 import org.junit.After;
 import org.junit.Before;
@@ -49,6 +45,8 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
+import junit.framework.Assert;
+
 public class ConfigGroupDAOTest {
   private Injector injector;
   private ConfigGroupDAO configGroupDAO;
@@ -88,28 +86,13 @@ public class ConfigGroupDAOTest {
   private ConfigGroupEntity createConfigGroup(String clusterName,
          String groupName, String tag, String desc, List<HostEntity> hosts,
          List<ClusterConfigEntity> configs) throws Exception {
-    ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
-
-    // create an admin resource to represent this cluster
-    ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
-    if (resourceTypeEntity == null) {
-      resourceTypeEntity = new ResourceTypeEntity();
-      resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-      resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-      resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
-    }
-
+    Clusters clusters = injector.getInstance(Clusters.class);
     StackEntity stackEntity = stackDAO.find("HDP", "0.1");
 
-    ResourceEntity resourceEntity = new ResourceEntity();
-    resourceEntity.setResourceType(resourceTypeEntity);
+    clusters.addCluster(clusterName, new StackId(stackEntity));
+    ClusterEntity clusterEntity = clusterDAO.findByName(clusterName);
 
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName(clusterName);
-    clusterEntity.setResource(resourceEntity);
-    clusterEntity.setDesiredStack(stackEntity);
-
-    clusterDAO.create(clusterEntity);
+    ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
 
     configGroupEntity.setClusterEntity(clusterEntity);
     configGroupEntity.setClusterId(clusterEntity.getClusterId());

+ 11 - 16
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAOTest.java

@@ -18,22 +18,19 @@
 
 package org.apache.ambari.server.orm.dao;
 
-import com.google.inject.Provider;
-import junit.framework.Assert;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.junit.Test;
-
-import javax.persistence.EntityManager;
-
-import java.util.Collection;
-import java.util.HashSet;
-
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.junit.Test;
+
+import com.google.inject.Provider;
+
 /**
  * HostComponentDesiredStateDAO tests.
  */
@@ -46,19 +43,17 @@ public class HostComponentDesiredStateDAOTest {
     EntityManager entityManager = createNiceMock(EntityManager.class);
     HostDAO hostDAO = createNiceMock(HostDAO.class);
     HostEntity hostEntity = createNiceMock(HostEntity.class);
-    
+
     HostComponentDesiredStateEntity hostComponentDesiredStateEntity = createNiceMock(HostComponentDesiredStateEntity.class);
-    expect(hostComponentDesiredStateEntity.getHostId()).andReturn(1L).anyTimes();
     expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
 
-    expect(entityManager.merge(hostComponentDesiredStateEntity)).andReturn(hostComponentDesiredStateEntity).anyTimes();
     entityManager.remove(hostComponentDesiredStateEntity);
 
     hostEntity.removeHostComponentDesiredStateEntity(hostComponentDesiredStateEntity);
 
-    expect(hostDAO.merge(hostEntity)).andReturn(hostEntity).anyTimes();
+    expect(hostDAO.merge(hostEntity)).andReturn(hostEntity).atLeastOnce();
 
-    expect(hostDAO.findById(1L)).andReturn(hostEntity).anyTimes();
+    expect(hostComponentDesiredStateEntity.getHostEntity()).andReturn(hostEntity).atLeastOnce();
 
     replay(entityManagerProvider, entityManager, hostDAO, hostEntity, hostComponentDesiredStateEntity);
 

+ 8 - 20
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java

@@ -18,21 +18,19 @@
 
 package org.apache.ambari.server.orm.dao;
 
-import com.google.inject.Provider;
-import junit.framework.Assert;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.junit.Test;
-
-import javax.persistence.EntityManager;
-import java.util.Collection;
-import java.util.HashSet;
-
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.junit.Test;
+
+import com.google.inject.Provider;
+
 /**
  * HostComponentStateDAO tests.
  */
@@ -48,22 +46,12 @@ public class HostComponentStateDAOTest {
 
     HostComponentStateEntity hostComponentStateEntity = createNiceMock(HostComponentStateEntity.class);
 
-    expect(hostComponentStateEntity.getHostName()).andReturn("host1");
-    expect(hostDAO.findByName("host1")).andReturn(hostEntity);
     expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
 
-
-    expect(entityManager.merge(hostComponentStateEntity)).andReturn(hostComponentStateEntity).anyTimes();
     entityManager.remove(hostComponentStateEntity);
 
-
-    hostEntity.removeHostComponentStateEntity(hostComponentStateEntity);
-
-    expect(hostDAO.merge(hostEntity)).andReturn(hostEntity).anyTimes();
-
     replay(entityManagerProvider, entityManager, hostDAO, hostEntity, hostComponentStateEntity);
 
-
     HostComponentStateDAO dao = new HostComponentStateDAO();
     dao.entityManagerProvider = entityManagerProvider;
     dao.hostDAO = hostDAO;

+ 3 - 3
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAOTest.java

@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.orm.dao;
 
+import java.util.UUID;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -38,8 +40,6 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
-import java.util.UUID;
-
 /**
  * RepositoryVersionDAO unit tests.
  */
@@ -174,7 +174,7 @@ public class RepositoryVersionDAOTest {
   }
 
   @Test
-  public void testDeleteCascade() {
+  public void testDeleteCascade() throws Exception {
     long clusterId = helper.createCluster();
     ClusterEntity cluster = clusterDAO.findById(clusterId);
     createSingleRecord();

+ 10 - 9
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/SettingDAOTest.java

@@ -17,8 +17,13 @@
  */
 package org.apache.ambari.server.orm.dao;
 
-import com.google.inject.Guice;
-import com.google.inject.Injector;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNull;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -26,19 +31,15 @@ import org.apache.ambari.server.orm.entities.SettingEntity;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNull;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
 
 public class SettingDAOTest {
   private  Injector injector;
   private SettingDAO dao;
 
   @Before
-  public void setUp() {
+  public void setUp() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     dao = injector.getInstance(SettingDAO.class);
     injector.getInstance(GuiceJpaInitializer.class);

+ 4 - 4
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetDAOTest.java

@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.orm.dao;
 
+import java.util.LinkedList;
+import java.util.List;
+
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -33,9 +36,6 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
-import java.util.LinkedList;
-import java.util.List;
-
 /**
  * WidgetDAO unit tests.
  */
@@ -49,7 +49,7 @@ public class WidgetDAOTest {
 
 
   @Before
-  public void before() {
+  public void before() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     widgetDAO = injector.getInstance(WidgetDAO.class);
     widgetLayoutDAO = injector.getInstance(WidgetLayoutDAO.class);

+ 7 - 6
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/WidgetLayoutDAOTest.java

@@ -18,9 +18,9 @@
 
 package org.apache.ambari.server.orm.dao;
 
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import java.util.LinkedList;
+import java.util.List;
+
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -32,8 +32,9 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.util.LinkedList;
-import java.util.List;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
 
 /**
  * WidgetLayoutDAO unit tests.
@@ -48,7 +49,7 @@ public class WidgetLayoutDAOTest {
 
 
   @Before
-  public void before() {
+  public void before() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     widgetLayoutDAO = injector.getInstance(WidgetLayoutDAO.class);
     widgetDAO = injector.getInstance(WidgetDAO.class);

+ 0 - 5
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java

@@ -152,7 +152,6 @@ public class ComponentVersionCheckActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     // Create the starting repo version
     m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
@@ -208,7 +207,6 @@ public class ComponentVersionCheckActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     clusters.mapHostToCluster(hostName, clusterName);
 
@@ -365,7 +363,6 @@ public class ComponentVersionCheckActionTest {
     sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
     sch.setStackVersion(cluster.getCurrentStackVersion());
 
-    sch.persist();
     return sch;
   }
 
@@ -377,7 +374,6 @@ public class ComponentVersionCheckActionTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;
@@ -392,7 +388,6 @@ public class ComponentVersionCheckActionTest {
       serviceComponent = serviceComponentFactory.createNew(service, componentName);
       service.addServiceComponent(serviceComponent);
       serviceComponent.setDesiredState(State.INSTALLED);
-      serviceComponent.persist();
     }
 
     return serviceComponent;

+ 0 - 2
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java

@@ -1694,7 +1694,6 @@ public class ConfigureActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     // Creating starting repo
     m_helper.getOrCreateRepositoryVersion(HDP_220_STACK, HDP_2_2_0_0);
@@ -1742,7 +1741,6 @@ public class ConfigureActionTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;

+ 0 - 8
ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java

@@ -191,7 +191,6 @@ public class UpgradeActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     // Create the starting repo version
     m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
@@ -229,7 +228,6 @@ public class UpgradeActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     // Create the starting repo version
     m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
@@ -295,7 +293,6 @@ public class UpgradeActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     // without this, HostEntity will not have a relation to ClusterEntity
     clusters.mapHostsToCluster(Collections.singleton(hostName), clusterName);
@@ -376,7 +373,6 @@ public class UpgradeActionTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
 
     clusters.mapHostToCluster(hostName, clusterName);
 
@@ -1011,8 +1007,6 @@ public class UpgradeActionTest {
     sch.setState(State.INSTALLED);
     sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
     sch.setStackVersion(cluster.getCurrentStackVersion());
-
-    sch.persist();
     return sch;
   }
 
@@ -1024,7 +1018,6 @@ public class UpgradeActionTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;
@@ -1039,7 +1032,6 @@ public class UpgradeActionTest {
       serviceComponent = serviceComponentFactory.createNew(service, componentName);
       service.addServiceComponent(serviceComponent);
       serviceComponent.setDesiredState(State.INSTALLED);
-      serviceComponent.persist();
     }
 
     return serviceComponent;

+ 3 - 7
ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java

@@ -21,16 +21,11 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 
-import com.google.inject.persist.jpa.AmbariJpaPersistModule;
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
@@ -45,6 +40,8 @@ import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.Transactional;
 
+import junit.framework.Assert;
+
 public class ConfigGroupTest {
 
   private Clusters clusters;
@@ -75,8 +72,6 @@ public class ConfigGroupTest {
     clusters.addHost("h2");
     Assert.assertNotNull(clusters.getHost("h1"));
     Assert.assertNotNull(clusters.getHost("h2"));
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
   }
 
   @After
@@ -109,6 +104,7 @@ public class ConfigGroupTest {
       "HDFS", "New HDFS configs for h1", configs, hosts);
 
     configGroup.persist();
+    cluster.addConfigGroup(configGroup);
     return configGroup;
   }
 

+ 1 - 11
ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java

@@ -23,7 +23,6 @@ import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
-import java.lang.reflect.Field;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -32,11 +31,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
 
 import javax.persistence.EntityManager;
 
@@ -57,12 +52,10 @@ import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.host.HostFactory;
 import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.utils.SynchronousThreadPoolExecutor;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -120,9 +113,6 @@ public class ConfigHelperTest {
       Assert.assertNotNull(clusters.getHost("h1"));
       Assert.assertNotNull(clusters.getHost("h2"));
       Assert.assertNotNull(clusters.getHost("h3"));
-      clusters.getHost("h1").persist();
-      clusters.getHost("h2").persist();
-      clusters.getHost("h3").persist();
 
       // core-site
       ConfigurationRequest cr = new ConfigurationRequest();
@@ -989,7 +979,7 @@ public class ConfigHelperTest {
           bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class));
           bind(AmbariMetaInfo.class).toInstance(mockMetaInfo);
           bind(RequestFactory.class).toInstance(createNiceMock(RequestFactory.class));
-          bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
+          bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
           bind(ClusterController.class).toInstance(clusterController);
           bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
           bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));

+ 4 - 8
ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java

@@ -21,8 +21,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.RequestScheduleResponse;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -46,6 +44,8 @@ import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.Transactional;
 
+import junit.framework.Assert;
+
 public class RequestExecutionTest {
   private Injector injector;
   private Clusters clusters;
@@ -74,9 +74,6 @@ public class RequestExecutionTest {
     Assert.assertNotNull(clusters.getHost("h1"));
     Assert.assertNotNull(clusters.getHost("h2"));
     Assert.assertNotNull(clusters.getHost("h3"));
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-    clusters.getHost("h3").persist();
   }
 
   @After
@@ -120,7 +117,7 @@ public class RequestExecutionTest {
     requestExecution.setDescription("Test Schedule");
 
     requestExecution.persist();
-
+    cluster.addRequestExecution(requestExecution);
     return requestExecution;
   }
 
@@ -270,8 +267,7 @@ public class RequestExecutionTest {
     Assert.assertNotNull(requestExecution);
 
     Long id = requestExecution.getId();
-
-    requestExecution.delete();
+    cluster.deleteRequestExecution(id);
 
     Assert.assertNull(requestScheduleDAO.findById(id));
     Assert.assertNull(cluster.getAllRequestExecutions().get(id));

+ 7 - 40
ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java

@@ -103,7 +103,6 @@ public class ServiceComponentTest {
 
     Service s = serviceFactory.createNew(cluster, serviceName);
     cluster.addService(s);
-    s.persist();
     service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
   }
@@ -119,7 +118,6 @@ public class ServiceComponentTest {
     ServiceComponent component = serviceComponentFactory.createNew(service,
         componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -142,7 +140,6 @@ public class ServiceComponentTest {
     ServiceComponent component = serviceComponentFactory.createNew(service,
         componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -186,12 +183,10 @@ public class ServiceComponentTest {
     h.setIPv6(hostname + "ipv6");
 
     Map<String, String> hostAttributes = new HashMap<String, String>();
-	hostAttributes.put("os_family", "redhat");
-	hostAttributes.put("os_release_version", "6.3");
-	h.setHostAttributes(hostAttributes);
+    hostAttributes.put("os_family", "redhat");
+    hostAttributes.put("os_release_version", "6.3");
+    h.setHostAttributes(hostAttributes);
 
-
-    h.persist();
     clusters.mapHostToCluster(hostname, clusterName);
   }
 
@@ -201,7 +196,6 @@ public class ServiceComponentTest {
     ServiceComponent component = serviceComponentFactory.createNew(service,
         componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -222,43 +216,23 @@ public class ServiceComponentTest {
     HostEntity hostEntity1 = hostDAO.findByName("h1");
     assertNotNull(hostEntity1);
 
-    ServiceComponentHost sch1 =
-        serviceComponentHostFactory.createNew(sc, "h1");
-    ServiceComponentHost sch2 =
-        serviceComponentHostFactory.createNew(sc, "h2");
-    ServiceComponentHost failSch =
-        serviceComponentHostFactory.createNew(sc, "h2");
-
-    Map<String, ServiceComponentHost> compHosts =
-        new HashMap<String, ServiceComponentHost>();
-    compHosts.put("h1", sch1);
-    compHosts.put("h2", sch2);
-    compHosts.put("h3", failSch);
+    ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
+    ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
 
     try {
-      sc.addServiceComponentHosts(compHosts);
+      sc.addServiceComponentHost("h2");
       fail("Expected error for dups");
     } catch (Exception e) {
       // Expected
     }
-    Assert.assertTrue(sc.getServiceComponentHosts().isEmpty());
-
-    compHosts.remove("h3");
-    sc.addServiceComponentHosts(compHosts);
 
     Assert.assertEquals(2, sc.getServiceComponentHosts().size());
 
-    sch1.persist();
-    sch2.persist();
-
     ServiceComponentHost schCheck = sc.getServiceComponentHost("h2");
     Assert.assertNotNull(schCheck);
     Assert.assertEquals("h2", schCheck.getHostName());
 
-    ServiceComponentHost sch3 =
-        serviceComponentHostFactory.createNew(sc, "h3");
-    sc.addServiceComponentHost(sch3);
-    sch3.persist();
+    sc.addServiceComponentHost("h3");
     Assert.assertNotNull(sc.getServiceComponentHost("h3"));
 
     sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
@@ -302,7 +276,6 @@ public class ServiceComponentTest {
     ServiceComponent component = serviceComponentFactory.createNew(service,
         componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     addHostToCluster("h1", service.getCluster().getClusterName());
     ServiceComponentHost sch =
@@ -314,7 +287,6 @@ public class ServiceComponentTest {
     compHosts.put("h1", sch);
     component.addServiceComponentHosts(compHosts);
     Assert.assertEquals(1, component.getServiceComponentHosts().size());
-    sch.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -379,7 +351,6 @@ public class ServiceComponentTest {
     String componentName = "NAMENODE";
     ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -430,7 +401,6 @@ public class ServiceComponentTest {
     String componentName = "NAMENODE";
     ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -498,7 +468,6 @@ public class ServiceComponentTest {
     String componentName = "NAMENODE";
     ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -565,7 +534,6 @@ public class ServiceComponentTest {
     String componentName = "NAMENODE";
     ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
@@ -616,7 +584,6 @@ public class ServiceComponentTest {
     String componentName = "NAMENODE";
     ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
-    component.persist();
 
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);

+ 2 - 13
ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java

@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.state;
 
-import junit.framework.Assert;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -40,6 +39,8 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 
+import junit.framework.Assert;
+
 public class ServiceTest {
 
   private Clusters clusters;
@@ -119,7 +120,6 @@ public class ServiceTest {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);
     cluster.addService(s);
-    s.persist();
 
     Service service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
@@ -141,7 +141,6 @@ public class ServiceTest {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);
     cluster.addService(s);
-    s.persist();
 
     Service service = cluster.getService(serviceName);
 
@@ -185,17 +184,11 @@ public class ServiceTest {
 
     s.addServiceComponent(sc3);
 
-    sc1.persist();
-    sc2.persist();
-    sc3.persist();
-
     ServiceComponent sc4 = s.addServiceComponent("HDFS_CLIENT");
     Assert.assertNotNull(s.getServiceComponent(sc4.getName()));
     Assert.assertEquals(State.INIT,
         s.getServiceComponent("HDFS_CLIENT").getDesiredState());
     Assert.assertTrue(sc4.isClientComponent());
-    sc4.persist();
-
     Assert.assertEquals(4, s.getServiceComponents().size());
 
     Assert.assertNotNull(s.getServiceComponent(sc3.getName()));
@@ -267,7 +260,6 @@ public class ServiceTest {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);
     cluster.addService(s);
-    s.persist();
 
     Service service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
@@ -291,7 +283,6 @@ public class ServiceTest {
     String serviceName = "HDFS";
     Service s = serviceFactory.createNew(cluster, serviceName);
     cluster.addService(s);
-    s.persist();
 
     Service service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
@@ -339,8 +330,6 @@ public class ServiceTest {
     hostAttributes.put("os_release_version", "6.3");
     h.setHostAttributes(hostAttributes);
 
-
-    h.persist();
     clusters.mapHostToCluster(hostname, clusterName);
   }
 }

+ 0 - 8
ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java

@@ -508,8 +508,6 @@ public class UpgradeHelperTest {
     Clusters clusters = injector.getInstance(Clusters.class);
     Host h4 = clusters.getHost("h4");
     h4.setState(HostState.HEARTBEAT_LOST);
-    h4.persist();
-
 
     List<ServiceComponentHost> schs = cluster.getServiceComponentHosts("h4");
     assertEquals(1, schs.size());
@@ -1224,7 +1222,6 @@ public class UpgradeHelperTest {
 
       host.setHostAttributes(hostAttributes);
 
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
     }
 
@@ -1441,7 +1438,6 @@ public class UpgradeHelperTest {
 
       host.setHostAttributes(hostAttributes);
 
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
     }
 
@@ -1521,7 +1517,6 @@ public class UpgradeHelperTest {
 
       host.setHostAttributes(hostAttributes);
 
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
     }
 
@@ -1587,7 +1582,6 @@ public class UpgradeHelperTest {
 
       host.setHostAttributes(hostAttributes);
 
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
     }
 
@@ -1654,7 +1648,6 @@ public class UpgradeHelperTest {
 
       host.setHostAttributes(hostAttributes);
 
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
     }
 
@@ -1773,7 +1766,6 @@ public class UpgradeHelperTest {
 
       host.setHostAttributes(hostAttributes);
 
-      host.persist();
       clusters.mapHostToCluster(hostName, clusterName);
     }
 

+ 3 - 4
ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java

@@ -19,8 +19,6 @@ package org.apache.ambari.server.state.alerts;
 
 import java.util.UUID;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.events.AlertDefinitionChangedEvent;
 import org.apache.ambari.server.events.AlertDefinitionDeleteEvent;
 import org.apache.ambari.server.events.AmbariEvent;
@@ -51,12 +49,14 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.google.gson.Gson;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
-import org.junit.experimental.categories.Category;
+
+import junit.framework.Assert;
 
 /**
  * Tests that {@link AmbariEvent} instances are fired correctly and that alert
@@ -306,7 +306,6 @@ public class AlertEventPublisherTest {
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = serviceFactory.createNew(cluster, serviceName);
-    service.persist();
     service = cluster.getService(serviceName);
 
     Assert.assertNotNull(service);

+ 1 - 2
ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java

@@ -41,6 +41,7 @@ import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import com.google.common.eventbus.EventBus;
 import com.google.inject.Binder;
@@ -51,7 +52,6 @@ import com.google.inject.persist.PersistService;
 import com.google.inject.util.Modules;
 
 import junit.framework.Assert;
-import org.junit.experimental.categories.Category;
 
 /**
  * Tests that {@link InitialAlertEventTest} instances are fired correctly.
@@ -177,7 +177,6 @@ public class InitialAlertEventTest {
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = m_serviceFactory.createNew(m_cluster, serviceName);
-    service.persist();
     service = m_cluster.getService(serviceName);
 
     Assert.assertNotNull(service);

+ 15 - 26
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java

@@ -18,6 +18,21 @@
 
 package org.apache.ambari.server.state.cluster;
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ServiceComponentNotFoundException;
+import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncListener;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -34,22 +49,6 @@ import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.testing.DeadlockWarningThread;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceComponentNotFoundException;
-import org.apache.ambari.server.ServiceNotFoundException;
-import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncListener;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.OrmTestHelper;
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Assert;
@@ -63,8 +62,6 @@ import com.google.inject.Injector;
 import com.google.inject.Module;
 import com.google.inject.persist.PersistService;
 import com.google.inject.util.Modules;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Tests AMBARI-9368 and AMBARI-9761 which produced a deadlock during read and
@@ -144,7 +141,6 @@ public class ClusterDeadlockTest {
 
       clusters.addHost(hostName);
       setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
-      clusters.getHost(hostName).persist();
       clusters.mapHostToCluster(hostName, "c1");
     }
 
@@ -256,10 +252,6 @@ public class ClusterDeadlockTest {
           "DATANODE", hostName));
     }
 
-    // !!! needed to populate some maps; without this, the cluster report
-    // won't do anything and this test will be worthless
-    ((ClusterImpl) cluster).loadServiceHostComponents();
-
     List<Thread> threads = new ArrayList<Thread>();
     for (int i = 0; i < NUMBER_OF_THREADS; i++) {
       ClusterReaderThread clusterReaderThread = new ClusterReaderThread();
@@ -590,7 +582,6 @@ public class ClusterDeadlockTest {
     sch.setDesiredStackVersion(stackId);
     sch.setStackVersion(stackId);
 
-    sch.persist();
     return sch;
   }
 
@@ -602,7 +593,6 @@ public class ClusterDeadlockTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;
@@ -618,7 +608,6 @@ public class ClusterDeadlockTest {
           componentName);
       service.addServiceComponent(serviceComponent);
       serviceComponent.setDesiredState(State.INSTALLED);
-      serviceComponent.persist();
     }
 
     return serviceComponent;

+ 15 - 4
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java

@@ -39,7 +39,9 @@ import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.scheduler.ExecutionScheduler;
@@ -75,6 +77,7 @@ import com.google.inject.Binder;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.Module;
+import com.google.inject.assistedinject.FactoryModuleBuilder;
 
 import junit.framework.Assert;
 
@@ -99,11 +102,10 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
 
     expectClusterEntityMocks();
 
-    AmbariEventPublisher eventPublisher = createNiceMock(AmbariEventPublisher.class);
-
     replayAll();
 
-    m_cluster = new ClusterImpl(m_clusterEntity, m_injector, eventPublisher);
+    ClusterFactory clusterFactory = m_injector.getInstance(ClusterFactory.class);
+    m_cluster = clusterFactory.create(m_clusterEntity);
 
     verifyAll();
   }
@@ -227,6 +229,12 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
         new ArrayList<ClusterServiceEntity>()).anyTimes();
     EasyMock.expect(m_clusterEntity.getClusterConfigEntities()).andReturn(
         new ArrayList<ClusterConfigEntity>()).anyTimes();
+
+    EasyMock.expect(m_clusterEntity.getConfigGroupEntities()).andReturn(
+        new ArrayList<ConfigGroupEntity>()).anyTimes();
+
+    EasyMock.expect(m_clusterEntity.getRequestScheduleEntities()).andReturn(
+        new ArrayList<RequestScheduleEntity>()).anyTimes();
   }
 
   /**
@@ -262,13 +270,16 @@ public class ClusterEffectiveVersionTest extends EasyMockSupport {
       binder.bind(PasswordEncoder.class).toInstance(EasyMock.createNiceMock(PasswordEncoder.class));
       binder.bind(KerberosHelper.class).toInstance(EasyMock.createNiceMock(KerberosHelper.class));
       binder.bind(Users.class).toInstance(EasyMock.createNiceMock(Users.class));
+      binder.bind(AmbariEventPublisher.class).toInstance(createNiceMock(AmbariEventPublisher.class));
 
+      binder.install(new FactoryModuleBuilder().implement(
+          Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
 
       try {
         AmbariMetaInfo ambariMetaInfo = EasyMock.createNiceMock(AmbariMetaInfo.class);
         EasyMock.expect(
             ambariMetaInfo.getServices(EasyMock.anyString(), EasyMock.anyString())).andReturn(
-                new HashMap<String, ServiceInfo>());
+                new HashMap<String, ServiceInfo>()).anyTimes();
 
         EasyMock.replay(ambariMetaInfo);
 

+ 24 - 42
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java

@@ -18,11 +18,20 @@
 
 package org.apache.ambari.server.state.cluster;
 
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.controller.AmbariSessionManager;
@@ -35,26 +44,13 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
-import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.verify;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
 
 public class ClusterImplTest {
 
@@ -67,7 +63,7 @@ public class ClusterImplTest {
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
   }
-  
+
   @Test
   public void testAddSessionAttributes() throws Exception {
     Map<String, Object> attributes = new HashMap<String, Object>();
@@ -211,40 +207,30 @@ public class ClusterImplTest {
 
     Host host1 = clusters.getHost(hostName1);
     host1.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
-    host1.persist();
 
     Host host2 = clusters.getHost(hostName2);
     host2.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
-    host2.persist();
 
     clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
 
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
-    nameNode.addServiceComponentHost(hostName1).persist();
+    nameNode.addServiceComponentHost(hostName1);
 
     ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE");
-    dataNode.persist();
-    dataNode.addServiceComponentHost(hostName1).persist();
-    dataNode.addServiceComponentHost(hostName2).persist();
+    dataNode.addServiceComponentHost(hostName1);
+    dataNode.addServiceComponentHost(hostName2);
 
     ServiceComponent hdfsClient = hdfs.addServiceComponent("HDFS_CLIENT");
-    hdfsClient.persist();
-    hdfsClient.addServiceComponentHost(hostName1).persist();
-    hdfsClient.addServiceComponentHost(hostName2).persist();
+    hdfsClient.addServiceComponentHost(hostName1);
+    hdfsClient.addServiceComponentHost(hostName2);
 
     Service tez = cluster.addService(serviceToDelete);
-    tez.persist();
 
     ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT");
-    tezClient.persist();
     ServiceComponentHost tezClientHost1 =  tezClient.addServiceComponentHost(hostName1);
-    tezClientHost1.persist();
     ServiceComponentHost tezClientHost2 = tezClient.addServiceComponentHost(hostName2);
-    tezClientHost2.persist();
 
     // When
     cluster.deleteService(serviceToDelete);
@@ -279,11 +265,9 @@ public class ClusterImplTest {
 
     Host host1 = clusters.getHost(hostName1);
     host1.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
-    host1.persist();
 
     Host host2 = clusters.getHost(hostName2);
     host2.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
-    host2.persist();
 
     clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
 
@@ -320,11 +304,9 @@ public class ClusterImplTest {
 
     Host host1 = clusters.getHost(hostName1);
     host1.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
-    host1.persist();
 
     Host host2 = clusters.getHost(hostName2);
     host2.setHostAttributes(ImmutableMap.of("os_family", "centos", "os_release_version", "6.0"));
-    host2.persist();
 
     clusters.mapHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
 

+ 9 - 113
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java

@@ -79,11 +79,8 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.HostStateEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
 import org.apache.ambari.server.state.AgentVersion;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -232,41 +229,24 @@ public class ClusterTest {
 
     String clusterName = "c1";
 
-    ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
-    if (resourceTypeEntity == null) {
-      resourceTypeEntity = new ResourceTypeEntity();
-      resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-      resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-      resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
-    }
-    ResourceEntity resourceEntity = new ResourceEntity();
-    resourceEntity.setResourceType(resourceTypeEntity);
-
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName(clusterName);
-    clusterEntity.setResource(resourceEntity);
-    clusterEntity.setDesiredStack(stackEntity);
-    clusterDAO.create(clusterEntity);
+    clusters.addCluster(clusterName, stackId);
 
     Map<String, String> hostAttributes = new HashMap<String, String>();
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "5.9");
 
-    List<HostEntity> hostEntities = new ArrayList<HostEntity>();
     Set<String> hostNames = new HashSet<String>() {{ add("h1"); add("h2"); }};
     for (String hostName : hostNames) {
-      HostEntity hostEntity = new HostEntity();
-      hostEntity.setHostName(hostName);
+      clusters.addHost(hostName);
+
+      HostEntity hostEntity = hostDAO.findByName(hostName);
       hostEntity.setIpv4("ipv4");
       hostEntity.setIpv6("ipv6");
       hostEntity.setHostAttributes(gson.toJson(hostAttributes));
-      hostEntity.setClusterEntities(Arrays.asList(clusterEntity));
-      hostEntities.add(hostEntity);
-      hostDAO.create(hostEntity);
+      hostDAO.merge(hostEntity);
     }
 
-    clusterEntity.setHostEntities(hostEntities);
-    clusterDAO.merge(clusterEntity);
+    clusters.mapHostsToCluster(hostNames, clusterName);
     c1 = clusters.getCluster(clusterName);
 
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
@@ -403,9 +383,6 @@ public class ClusterTest {
     cluster.addService(s1);
     cluster.addService(s2);
     cluster.addService(s3);
-    s1.persist();
-    s2.persist();
-    s3.persist();
 
     // Add HDFS components
     ServiceComponent sc1CompA = serviceComponentFactory.createNew(s1, "NAMENODE");
@@ -414,25 +391,18 @@ public class ClusterTest {
     s1.addServiceComponent(sc1CompA);
     s1.addServiceComponent(sc1CompB);
     s1.addServiceComponent(sc1CompC);
-    sc1CompA.persist();
-    sc1CompB.persist();
-    sc1CompC.persist();
 
     // Add ZK
     ServiceComponent sc2CompA = serviceComponentFactory.createNew(s2, "ZOOKEEPER_SERVER");
     ServiceComponent sc2CompB = serviceComponentFactory.createNew(s2, "ZOOKEEPER_CLIENT");
     s2.addServiceComponent(sc2CompA);
     s2.addServiceComponent(sc2CompB);
-    sc2CompA.persist();
-    sc2CompB.persist();
 
     // Add Ganglia
     ServiceComponent sc3CompA = serviceComponentFactory.createNew(s3, "GANGLIA_SERVER");
     ServiceComponent sc3CompB = serviceComponentFactory.createNew(s3, "GANGLIA_MONITOR");
     s3.addServiceComponent(sc3CompA);
     s3.addServiceComponent(sc3CompB);
-    sc3CompA.persist();
-    sc3CompB.persist();
 
     // Host 1 will have all components
     ServiceComponentHost schHost1Serv1CompA = serviceComponentHostFactory.createNew(sc1CompA, "h-1");
@@ -449,26 +419,16 @@ public class ClusterTest {
     sc2CompB.addServiceComponentHost(schHost1Serv2CompB);
     sc3CompA.addServiceComponentHost(schHost1Serv3CompA);
     sc3CompB.addServiceComponentHost(schHost1Serv3CompB);
-    schHost1Serv1CompA.persist();
-    schHost1Serv1CompB.persist();
-    schHost1Serv1CompC.persist();
-    schHost1Serv2CompA.persist();
-    schHost1Serv2CompB.persist();
-    schHost1Serv3CompA.persist();
-    schHost1Serv3CompB.persist();
 
     // Host 2 will have ZK_CLIENT and GANGLIA_MONITOR
     ServiceComponentHost schHost2Serv2CompB = serviceComponentHostFactory.createNew(sc2CompB, "h-2");
     ServiceComponentHost schHost2Serv3CompB = serviceComponentHostFactory.createNew(sc3CompB, "h-2");
     sc2CompB.addServiceComponentHost(schHost2Serv2CompB);
     sc3CompB.addServiceComponentHost(schHost2Serv3CompB);
-    schHost2Serv2CompB.persist();
-    schHost2Serv3CompB.persist();
 
     // Host 3 will have GANGLIA_MONITOR
     ServiceComponentHost schHost3Serv3CompB = serviceComponentHostFactory.createNew(sc3CompB, "h-3");
     sc3CompB.addServiceComponentHost(schHost3Serv3CompB);
-    schHost3Serv3CompB.persist();
 
     // Verify count of components
     List<ServiceComponentHost> scHost1 = cluster.getServiceComponentHosts("h-1");
@@ -528,7 +488,6 @@ public class ClusterTest {
     host.setIPv4("ipv4");
     host.setIPv6("ipv6");
     host.setHostAttributes(hostAttributes);
-    host.persist();
   }
 
   /**
@@ -687,9 +646,6 @@ public class ClusterTest {
     Service s1 = serviceFactory.createNew(c1, "HDFS");
     Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
 
-    s1.persist();
-    s2.persist();
-
     Service s = c1.getService("HDFS");
     Assert.assertNotNull(s);
     Assert.assertEquals("HDFS", s.getName());
@@ -717,14 +673,12 @@ public class ClusterTest {
 
     Service s = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s);
-    s.persist();
     ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE");
     s.addServiceComponent(sc);
-    sc.persist();
+
     ServiceComponentHost sch =
         serviceComponentHostFactory.createNew(sc, "h1");
     sc.addServiceComponentHost(sch);
-    sch.persist();
 
     List<ServiceComponentHost> scHosts = c1.getServiceComponentHosts("h1");
     Assert.assertEquals(1, scHosts.size());
@@ -737,13 +691,10 @@ public class ClusterTest {
         iterator.next();
         Service s1 = serviceFactory.createNew(c1, "PIG");
         c1.addService(s1);
-        s1.persist();
         ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG");
         s1.addServiceComponent(sc1);
-        sc1.persist();
         ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1");
         sc1.addServiceComponentHost(sch1);
-        sch1.persist();
       }
     } catch (ConcurrentModificationException e ) {
       Assert.assertTrue("Failed to work concurrently with sch", false);
@@ -759,24 +710,18 @@ public class ClusterTest {
 
     Service s = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s);
-    s.persist();
 
     ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
     s.addServiceComponent(scNN);
-    scNN.persist();
     ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
     scNN.addServiceComponentHost(schNNH1);
-    schNNH1.persist();
 
     ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
     s.addServiceComponent(scDN);
-    scDN.persist();
     ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
     scDN.addServiceComponentHost(scDNH1);
-    scDNH1.persist();
     ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
     scDN.addServiceComponentHost(scDNH2);
-    scDNH2.persist();
 
     List<ServiceComponentHost> scHosts;
 
@@ -793,24 +738,18 @@ public class ClusterTest {
 
     Service s = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s);
-    s.persist();
 
     ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
     s.addServiceComponent(scNN);
-    scNN.persist();
     ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
     scNN.addServiceComponentHost(schNNH1);
-    schNNH1.persist();
 
     ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
     s.addServiceComponent(scDN);
-    scDN.persist();
     ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
     scDN.addServiceComponentHost(scDNH1);
-    scDNH1.persist();
     ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
     scDN.addServiceComponentHost(scDNH2);
-    scDNH2.persist();
 
     List<ServiceComponentHost> scHosts;
 
@@ -833,24 +772,18 @@ public class ClusterTest {
 
     Service s = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s);
-    s.persist();
 
     ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
     s.addServiceComponent(scNN);
-    scNN.persist();
     ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
     scNN.addServiceComponentHost(schNNH1);
-    schNNH1.persist();
 
     ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE");
     s.addServiceComponent(scDN);
-    scDN.persist();
     ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
     scDN.addServiceComponentHost(scDNH1);
-    scDNH1.persist();
     ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
     scDN.addServiceComponentHost(scDNH2);
-    scDNH2.persist();
 
     Map<String, Set<String>> componentHostMap;
 
@@ -871,35 +804,26 @@ public class ClusterTest {
 
     Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
     c1.addService(sfHDFS);
-    sfHDFS.persist();
 
     Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
     c1.addService(sfMR);
-    sfMR.persist();
 
     ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
     sfHDFS.addServiceComponent(scNN);
-    scNN.persist();
     ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
     scNN.addServiceComponentHost(schNNH1);
-    schNNH1.persist();
 
     ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
     sfHDFS.addServiceComponent(scDN);
-    scDN.persist();
     ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
     scDN.addServiceComponentHost(scDNH1);
-    scDNH1.persist();
     ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
     scDN.addServiceComponentHost(scDNH2);
-    scDNH2.persist();
 
     ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
     sfMR.addServiceComponent(scJT);
-    scJT.persist();
     ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
     scJT.addServiceComponentHost(schJTH1);
-    schJTH1.persist();
 
     Map<String, Set<String>> componentHostMap;
 
@@ -936,35 +860,26 @@ public class ClusterTest {
 
     Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
     c1.addService(sfHDFS);
-    sfHDFS.persist();
 
     Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
     c1.addService(sfMR);
-    sfMR.persist();
 
     ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
     sfHDFS.addServiceComponent(scNN);
-    scNN.persist();
     ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
     scNN.addServiceComponentHost(schNNH1);
-    schNNH1.persist();
 
     ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
     sfHDFS.addServiceComponent(scDN);
-    scDN.persist();
     ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
     scDN.addServiceComponentHost(scDNH1);
-    scDNH1.persist();
     ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
     scDN.addServiceComponentHost(scDNH2);
-    scDNH2.persist();
 
     ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
     sfMR.addServiceComponent(scJT);
-    scJT.persist();
     ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
     scJT.addServiceComponentHost(schJTH1);
-    schJTH1.persist();
 
     Map<String, Set<String>> componentHostMap;
 
@@ -1002,35 +917,26 @@ public class ClusterTest {
 
     Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
     c1.addService(sfHDFS);
-    sfHDFS.persist();
 
     Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
     c1.addService(sfMR);
-    sfMR.persist();
 
     ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
     sfHDFS.addServiceComponent(scNN);
-    scNN.persist();
     ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1");
     scNN.addServiceComponentHost(schNNH1);
-    schNNH1.persist();
 
     ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE");
     sfHDFS.addServiceComponent(scDN);
-    scDN.persist();
     ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1");
     scDN.addServiceComponentHost(scDNH1);
-    scDNH1.persist();
     ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2");
     scDN.addServiceComponentHost(scDNH2);
-    scDNH2.persist();
 
     ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER");
     sfMR.addServiceComponent(scJT);
-    scJT.persist();
     ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1");
     scJT.addServiceComponentHost(schJTH1);
-    schJTH1.persist();
 
     Map<String, Set<String>> componentHostMap;
 
@@ -1180,7 +1086,6 @@ public class ClusterTest {
     host.setState(HostState.HEALTHY);
     host.setHealthStatus(new HostHealthStatus(HostHealthStatus.HealthStatus.HEALTHY, ""));
     host.setStatus(host.getHealthStatus().getHealthStatus().name());
-    host.persist();
     c1.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     clusters.mapHostToCluster("h3", "c1");
 
@@ -1207,13 +1112,10 @@ public class ClusterTest {
   public void testDeleteService() throws Exception {
     createDefaultCluster();
 
-    c1.addService("MAPREDUCE").persist();
+    c1.addService("MAPREDUCE");
 
     Service hdfs = c1.addService("HDFS");
-    hdfs.persist();
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
-
 
     assertEquals(2, c1.getServices().size());
     assertEquals(2, injector.getProvider(EntityManager.class).get().
@@ -1230,7 +1132,7 @@ public class ClusterTest {
   public void testDeleteServiceWithConfigHistory() throws Exception {
     createDefaultCluster();
 
-    c1.addService("HDFS").persist();
+    c1.addService("HDFS");
 
     Config config1 = configFactory.createNew(c1, "hdfs-site",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
@@ -2159,9 +2061,6 @@ public class ClusterTest {
     sc2CompA.addServiceComponentHost(schHost4Serv2CompA);
     sc2CompB.addServiceComponentHost(schHost4Serv2CompB);
     sc3CompB.addServiceComponentHost(schHost4Serv3CompB);
-    schHost4Serv2CompA.persist();
-    schHost4Serv2CompB.persist();
-    schHost4Serv3CompB.persist();
 
     simulateStackVersionListener(stackId, v1, cluster, hostComponentStateDAO.findByHost("h-4"));
 
@@ -2193,7 +2092,6 @@ public class ClusterTest {
     clusters.mapHostToCluster("h-5", clusterName);
     ServiceComponentHost schHost5Serv3CompB = serviceComponentHostFactory.createNew(sc3CompB, "h-5");
     sc3CompB.addServiceComponentHost(schHost5Serv3CompB);
-    schHost5Serv3CompB.persist();
 
     // Host 5 will be in OUT_OF_SYNC, so redistribute bits to it so that it reaches a state of INSTALLED
     HostVersionEntity h5Version2 = hostVersionDAO.findByClusterStackVersionAndHost(clusterName, stackId, v2, "h-5");
@@ -2334,7 +2232,6 @@ public class ClusterTest {
       hostAttributes.put("os_family", "redhat");
       hostAttributes.put("os_release_version", "5.9");
       h.setHostAttributes(hostAttributes);
-      h.persist();
     }
 
     String v1 = "2.0.5-1";
@@ -2405,7 +2302,6 @@ public class ClusterTest {
       hostAttributes.put("os_family", "redhat");
       hostAttributes.put("os_release_version", "5.9");
       h.setHostAttributes(hostAttributes);
-      h.persist();
     }
 
     String v1 = "2.0.5-1";

+ 3 - 9
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java

@@ -25,9 +25,6 @@ import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import com.google.inject.Provider;
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
@@ -57,9 +54,12 @@ import com.google.inject.Guice;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.Module;
+import com.google.inject.Provider;
 import com.google.inject.persist.PersistService;
 import com.google.inject.util.Modules;
 
+import junit.framework.Assert;
+
 /**
  * Tests AMBARI-9738 which produced a deadlock during read and writes between
  * {@link ClustersImpl} and {@link ClusterImpl}.
@@ -294,7 +294,6 @@ public class ClustersDeadlockTest {
           String hostName = "c64-" + hostNameCounter.getAndIncrement();
           clusters.addHost(hostName);
           setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
-          clusters.getHost(hostName).persist();
           clusters.mapHostToCluster(hostName, CLUSTER_NAME);
 
           Thread.sleep(10);
@@ -322,7 +321,6 @@ public class ClustersDeadlockTest {
           String hostName = "c64-" + hostNameCounter.getAndIncrement();
           clusters.addHost(hostName);
           setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
-          clusters.getHost(hostName).persist();
           clusters.mapHostToCluster(hostName, CLUSTER_NAME);
 
           // create DATANODE on this host so that we end up exercising the
@@ -357,7 +355,6 @@ public class ClustersDeadlockTest {
 
           clusters.addHost(hostName);
           setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
-          clusters.getHost(hostName).persist();
           clusters.mapHostToCluster(hostName, CLUSTER_NAME);
         }
 
@@ -388,7 +385,6 @@ public class ClustersDeadlockTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;
@@ -404,7 +400,6 @@ public class ClustersDeadlockTest {
           componentName);
       service.addServiceComponent(serviceComponent);
       serviceComponent.setDesiredState(State.INSTALLED);
-      serviceComponent.persist();
     }
 
     return serviceComponent;
@@ -425,7 +420,6 @@ public class ClustersDeadlockTest {
     sch.setDesiredStackVersion(stackId);
     sch.setStackVersion(stackId);
 
-    sch.persist();
     return sch;
   }
 

+ 0 - 17
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java

@@ -296,9 +296,6 @@ public class ClustersTest {
     setOsFamily(clusters.getHost(h1), "redhat", "6.4");
     setOsFamily(clusters.getHost(h2), "redhat", "5.9");
     setOsFamily(clusters.getHost(h3), "redhat", "6.4");
-    clusters.getHost(h1).persist();
-    clusters.getHost(h2).persist();
-    clusters.getHost(h3).persist();
 
     try {
         clusters.getClustersForHost(h4);
@@ -381,9 +378,6 @@ public class ClustersTest {
     setOsFamily(clusters.getHost(h1), "redhat", "6.4");
     setOsFamily(clusters.getHost(h2), "redhat", "5.9");
     setOsFamily(clusters.getHost(h3), "redhat", "6.4");
-    clusters.getHost(h1).persist();
-    clusters.getHost(h2).persist();
-    clusters.getHost(h3).persist();
     clusters.mapHostToCluster(h1, c1);
     clusters.mapHostToCluster(h2, c1);
 
@@ -435,8 +429,6 @@ public class ClustersTest {
     Host host2 = clusters.getHost(h2);
     setOsFamily(clusters.getHost(h1), "centos", "5.9");
     setOsFamily(clusters.getHost(h2), "centos", "5.9");
-    host1.persist();
-    host2.persist();
 
     clusters.mapHostsToCluster(new HashSet<String>() {
       {
@@ -446,31 +438,23 @@ public class ClustersTest {
 
     // host config override
     host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2);
-    host1.persist();
 
     Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
 
     Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS"));
 
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
     ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE");
-    dataNode.persist();
 
     ServiceComponent serviceCheckNode = hdfs.addServiceComponent("HDFS_CLIENT");
-    serviceCheckNode.persist();
 
     ServiceComponentHost nameNodeHost = nameNode.addServiceComponentHost(h1);
-    nameNodeHost.persist();
     HostEntity nameNodeHostEntity = hostDAO.findByName(nameNodeHost.getHostName());
     Assert.assertNotNull(nameNodeHostEntity);
 
     ServiceComponentHost dataNodeHost = dataNode.addServiceComponentHost(h2);
-    dataNodeHost.persist();
 
     ServiceComponentHost serviceCheckNodeHost = serviceCheckNode.addServiceComponentHost(h2);
-    serviceCheckNodeHost.persist();
     serviceCheckNodeHost.setState(State.UNKNOWN);
 
     HostComponentDesiredStateEntityPK hkdspk = new HostComponentDesiredStateEntityPK();
@@ -705,7 +689,6 @@ public class ClustersTest {
 
     Host host = clusters.getHost(hostName);
     setOsFamily(clusters.getHost(hostName), "centos", "5.9");
-    host.persist();
 
     Set<String> hostnames = new HashSet<>();
     hostnames.add(hostName);

+ 0 - 4
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java

@@ -115,7 +115,6 @@ public class ConcurrentServiceConfigVersionTest {
     String hostName = "c6401.ambari.apache.org";
     clusters.addHost(hostName);
     setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
-    clusters.getHost(hostName).persist();
     clusters.mapHostToCluster(hostName, "c1");
 
     Service service = installService("HDFS");
@@ -213,7 +212,6 @@ public class ConcurrentServiceConfigVersionTest {
     sch.setDesiredStackVersion(stackId);
     sch.setStackVersion(stackId);
 
-    sch.persist();
     return sch;
   }
 
@@ -225,7 +223,6 @@ public class ConcurrentServiceConfigVersionTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;
@@ -241,7 +238,6 @@ public class ConcurrentServiceConfigVersionTest {
           componentName);
       service.addServiceComponent(serviceComponent);
       serviceComponent.setDesiredState(State.INSTALLED);
-      serviceComponent.persist();
     }
 
     return serviceComponent;

+ 0 - 4
ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java

@@ -130,7 +130,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     String hostName = "c6401";
     clusters.addHost(hostName);
     setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
-    clusters.getHost(hostName).persist();
     clusters.mapHostToCluster(hostName, "c1");
 
     Service service = installService("HDFS");
@@ -242,7 +241,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     sch.setDesiredStackVersion(stackId);
     sch.setStackVersion(stackId);
 
-    sch.persist();
     return sch;
   }
 
@@ -254,7 +252,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     } catch (ServiceNotFoundException e) {
       service = serviceFactory.createNew(cluster, serviceName);
       cluster.addService(service);
-      service.persist();
     }
 
     return service;
@@ -270,7 +267,6 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
           componentName);
       service.addServiceComponent(serviceComponent);
       serviceComponent.setDesiredState(State.INSTALLED);
-      serviceComponent.persist();
     }
 
     return serviceComponent;

+ 18 - 32
ambari-server/src/test/java/org/apache/ambari/server/state/host/HostImplTest.java

@@ -17,25 +17,21 @@
  */
 package org.apache.ambari.server.state.host;
 
-import com.google.gson.Gson;
-import com.google.inject.Injector;
+import static org.easymock.EasyMock.expect;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map;
+
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostStateDAO;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.HostStateEntity;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.HostHealthStatus;
+import org.easymock.EasyMockSupport;
 import org.junit.Test;
 
-import java.util.Map;
-
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.*;
+import com.google.gson.Gson;
 
-public class HostImplTest {
+public class HostImplTest extends EasyMockSupport {
 
   @Test
   public void testGetHostAttributes() throws Exception {
@@ -43,36 +39,30 @@ public class HostImplTest {
     HostEntity hostEntity = createNiceMock(HostEntity.class);
     HostStateEntity hostStateEntity = createNiceMock(HostStateEntity.class);
     HostDAO hostDAO  = createNiceMock(HostDAO.class);
-    Injector injector = createNiceMock(Injector.class);
     HostStateDAO hostStateDAO  = createNiceMock(HostStateDAO.class);
 
-
     Gson gson = new Gson();
 
-    expect(injector.getInstance(Gson.class)).andReturn(gson).anyTimes();
-    expect(injector.getInstance(HostDAO.class)).andReturn(hostDAO).anyTimes();
-    expect(injector.getInstance(HostStateDAO.class)).andReturn(hostStateDAO).anyTimes();
     expect(hostEntity.getHostAttributes()).andReturn("{\"foo\": \"aaa\", \"bar\":\"bbb\"}").anyTimes();
     expect(hostEntity.getHostId()).andReturn(1L).anyTimes();
     expect(hostEntity.getHostName()).andReturn("host1").anyTimes();
     expect(hostEntity.getHostStateEntity()).andReturn(hostStateEntity).anyTimes();
-    expect(hostDAO.findById(1L)).andReturn(hostEntity).once();
-    expect(hostStateDAO.findByHostId(1L)).andReturn(hostStateEntity).once();
+    expect(hostDAO.findById(1L)).andReturn(hostEntity).atLeastOnce();
 
-    replay(hostEntity, hostStateEntity, injector, hostDAO);
-    HostImpl host = new HostImpl(hostEntity, false, injector);
+    replayAll();
+    HostImpl host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
 
     Map<String, String> hostAttributes = host.getHostAttributes();
     assertEquals("aaa", hostAttributes.get("foo"));
     assertEquals("bbb", hostAttributes.get("bar"));
 
-    host = new HostImpl(hostEntity, true, injector);
+    host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
 
     hostAttributes = host.getHostAttributes();
     assertEquals("aaa", hostAttributes.get("foo"));
     assertEquals("bbb", hostAttributes.get("bar"));
 
-    verify(hostEntity, hostStateEntity, injector, hostDAO);
+    verifyAll();
   }
 
   @Test
@@ -82,29 +72,25 @@ public class HostImplTest {
     HostStateEntity hostStateEntity = createNiceMock(HostStateEntity.class);
     HostDAO hostDAO  = createNiceMock(HostDAO.class);
     HostStateDAO hostStateDAO  = createNiceMock(HostStateDAO.class);
-    Injector injector = createNiceMock(Injector.class);
 
     Gson gson = new Gson();
 
-    expect(injector.getInstance(Gson.class)).andReturn(gson).anyTimes();
-    expect(injector.getInstance(HostDAO.class)).andReturn(hostDAO).anyTimes();
-    expect(injector.getInstance(HostStateDAO.class)).andReturn(hostStateDAO).anyTimes();
     expect(hostEntity.getHostAttributes()).andReturn("{\"foo\": \"aaa\", \"bar\":\"bbb\"}").anyTimes();
     expect(hostEntity.getHostName()).andReturn("host1").anyTimes();
     expect(hostEntity.getHostId()).andReturn(1L).anyTimes();
     expect(hostEntity.getHostStateEntity()).andReturn(hostStateEntity).anyTimes();
     expect(hostDAO.findById(1L)).andReturn(hostEntity).anyTimes();
-    expect(hostStateDAO.findByHostId(1L)).andReturn(hostStateEntity).once();
+    expect(hostStateDAO.findByHostId(1L)).andReturn(hostStateEntity).atLeastOnce();
 
-    replay(hostEntity, hostStateEntity, injector, hostDAO);
-    HostImpl host = new HostImpl(hostEntity, false, injector);
+    replayAll();
+    HostImpl host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
 
     host.getHealthStatus();
 
-    host = new HostImpl(hostEntity, true, injector);
+    host = new HostImpl(hostEntity, gson, hostDAO, hostStateDAO);
 
     host.getHealthStatus();
 
-    verify(hostEntity, hostStateEntity, injector, hostDAO);
+    verifyAll();
   }
 }

+ 18 - 4
ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java

@@ -164,9 +164,11 @@ public class HostTest {
     HostRegistrationRequestEvent e =
         new HostRegistrationRequestEvent("foo", agentVersion, currentTime,
             info, agentEnv);
+
     if (!firstReg) {
-      Assert.assertTrue(host.isPersisted());
+      Assert.assertNotNull(host.getHostId());
     }
+
     host.handleEvent(e);
     Assert.assertEquals(currentTime, host.getLastRegistrationTime());
 
@@ -378,7 +380,6 @@ public class HostTest {
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
 
-    host.persist();
     c1.setDesiredStackVersion(stackId);
     clusters.mapHostToCluster("h1", "c1");
 
@@ -437,8 +438,6 @@ public class HostTest {
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
 
-    host.persist();
-
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
     c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
@@ -457,4 +456,19 @@ public class HostTest {
     Assert.assertNotNull(stateEntity.getMaintenanceState());
     Assert.assertEquals(MaintenanceState.ON, host.getMaintenanceState(c1.getClusterId()));
   }
+
+  @Test
+  public void testHostPersist() throws Exception {
+    clusters.addHost("foo");
+    Host host = clusters.getHost("foo");
+
+    String rackInfo = "rackInfo";
+    long lastRegistrationTime = System.currentTimeMillis();
+
+    host.setRackInfo(rackInfo);
+    host.setLastRegistrationTime(lastRegistrationTime);
+
+    Assert.assertEquals(rackInfo, host.getRackInfo());
+    Assert.assertEquals(lastRegistrationTime, host.getLastRegistrationTime());
+  }
 }

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů