Browse Source

AMBARI-3577 - Move service related code in AmbariManagementController to ServiceResourceProvider

tbeerbower 12 years ago
parent
commit
1aad6407be

+ 71 - 79
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java

@@ -19,8 +19,17 @@
 package org.apache.ambari.server.controller;
 
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ParentObjectNotFoundException;
-
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.State;
+
+import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -40,16 +49,6 @@ public interface AmbariManagementController {
    */
   public void createCluster(ClusterRequest request) throws AmbariException;
 
-  /**
-   * Create the service defined by the attributes in the given request object.
-   *
-   * @param requests  the request object which defines the service to be created
-   *
-   * @throws AmbariException thrown if the service cannot be created
-   */
-  public void createServices(Set<ServiceRequest> requests)
-      throws AmbariException, ParentObjectNotFoundException;
-
   /**
    * Create the component defined by the attributes in the given request object.
    *
@@ -114,19 +113,6 @@ public interface AmbariManagementController {
   public Set<ClusterResponse> getClusters(Set<ClusterRequest> requests)
       throws AmbariException;
 
-  /**
-   * Get the services identified by the given request objects.
-   *
-   * @param requests  the request objects which identify the services
-   * to be returned
-   *
-   * @return a set of service responses
-   *
-   * @throws AmbariException thrown if the resource cannot be read
-   */
-  public Set<ServiceResponse> getServices(Set<ServiceRequest> requests)
-      throws AmbariException;
-
   /**
    * Get the components identified by the given request objects.
    *
@@ -212,17 +198,6 @@ public interface AmbariManagementController {
   public Set<UserResponse> getUsers(Set<UserRequest> requests)
       throws AmbariException;
   
-  /**
-   * Gets the host component config mappings
-   * 
-   * @param request the host component request
-   * 
-   * @return the configuration mappings
-   * 
-   * @throws AmbariException
-   */
-  public Map<String, String> getHostComponentDesiredConfigMapping(
-      ServiceComponentHostRequest request) throws AmbariException;
 
   // ----- Update -----------------------------------------------------------
 
@@ -243,25 +218,6 @@ public interface AmbariManagementController {
                                               Map<String, String> requestProperties)
       throws AmbariException;
 
-  /**
-   * Update the service identified by the given request object with the
-   * values carried by the given request object.
-   *
-   *
-   *
-   * @param requests    the request object which defines which service to
-   *                   update and the values to set
-   *
-   * @param requestProperties
-   * @param reconfigureClients
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be updated
-   */
-  public RequestStatusResponse updateServices(Set<ServiceRequest> requests,
-      Map<String, String> requestProperties, boolean runSmokeTest,
-      boolean reconfigureClients) throws AmbariException;
-
   /**
    * Update the component identified by the given request object with the
    * values carried by the given request object.
@@ -271,8 +227,9 @@ public interface AmbariManagementController {
    * @param requests    the request object which defines which component to
    *                   update and the values to set
    *
-   * @param requestProperties
-   * @param runSmokeTest
+   * @param requestProperties  the request properties
+   * @param runSmokeTest       indicates whether or not to run a smoke test
+   *
    * @return a track action response
    *
    * @throws AmbariException thrown if the resource cannot be updated
@@ -285,8 +242,8 @@ public interface AmbariManagementController {
    * Update the host identified by the given request object with the
    * values carried by the given request object.
    *
-   * @param requests    the request object which defines which host to
-   *                   update and the values to set
+   * @param requests  the request object which defines which host to
+   *                  update and the values to set
    *
    * @throws AmbariException thrown if the resource cannot be updated
    */
@@ -299,11 +256,11 @@ public interface AmbariManagementController {
    *
    *
    *
-   * @param requests    the request object which defines which host component to
-   *                   update and the values to set
+   * @param requests           the request object which defines which host component to
+   *                           update and the values to set
+   * @param requestProperties  the request properties
+   * @param runSmokeTest       indicates whether or not to run a smoke test
    *
-   * @param requestProperties
-   * @param runSmokeTest
    * @return a track action response
    *
    * @throws AmbariException thrown if the resource cannot be updated
@@ -332,18 +289,6 @@ public interface AmbariManagementController {
    */
   public void deleteCluster(ClusterRequest request) throws AmbariException;
 
-  /**
-   * Delete the service identified by the given request object.
-   *
-   * @param requests  the request object which identifies which service to delete
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be deleted
-   */
-  public RequestStatusResponse deleteServices(Set<ServiceRequest> requests)
-      throws AmbariException;
-
   /**
    * Delete the component identified by the given request object.
    *
@@ -361,8 +306,6 @@ public interface AmbariManagementController {
    *
    * @param requests  the request object which identifies which host to delete
    *
-   * @return a track action response
-   *
    * @throws AmbariException thrown if the resource cannot be deleted
    */
   public void deleteHosts(Set<HostRequest> requests)
@@ -412,7 +355,6 @@ public interface AmbariManagementController {
   public Set<ActionResponse> getActions(Set<ActionRequest> request)
       throws AmbariException;
 
-
   /**
    * Get supported stacks.
    * 
@@ -544,5 +486,55 @@ public interface AmbariManagementController {
    * @throws  AmbariException if the resources cannot be read
    */
   public Set<RootServiceHostComponentResponse> getRootServiceHostComponents(Set<RootServiceHostComponentRequest> requests) throws AmbariException;
-}
+
+
+  // ----- Common utility methods --------------------------------------------
+
+  /**
+   * Get the clusters for this management controller.
+   *
+   * @return the clusters
+   */
+  public Clusters getClusters();
+
+  /**
+   * Get the meta info for this management controller.
+   *
+   * @return the meta info
+   */
+  public AmbariMetaInfo getAmbariMetaInfo();
+
+  /**
+   * Get the service factory for this management controller.
+   *
+   * @return the service factory.
+   */
+  public ServiceFactory getServiceFactory();
+
+  /**
+   * Create the stages required to persist an action and return a result containing the
+   * associated request and resulting tasks.
+   *
+   * @param cluster             the cluster
+   * @param requestProperties   the request properties
+   * @param requestParameters   the request parameters; may be null
+   * @param changedServices     the services being changed; may be null
+   * @param changedComponents   the components being changed
+   * @param changedHosts        the hosts being changed
+   * @param ignoredHosts        the hosts to be ignored
+   * @param runSmokeTest        indicates whether or not the smoke tests should be run
+   * @param reconfigureClients  indicates whether or not the clients should be reconfigured
+   *
+   * @return the request response
+   *
+   * @throws AmbariException is thrown if the stages can not be created
+   */
+  public RequestStatusResponse createStages(Cluster cluster, Map<String, String> requestProperties,
+                                            Map<String, String> requestParameters,
+                                            Map<State, List<Service>> changedServices,
+                                            Map<State, List<ServiceComponent>> changedComponents,
+                                            Map<String, Map<State, List<ServiceComponentHost>>> changedHosts,
+                                            Collection<ServiceComponentHost> ignoredHosts,
+                                            boolean runSmokeTest, boolean reconfigureClients) throws AmbariException;
+  }
   

File diff suppressed because it is too large
+ 166 - 956
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java


+ 536 - 10
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java

@@ -18,16 +18,36 @@
 package org.apache.ambari.server.controller.internal;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ClusterNotFoundException;
+import org.apache.ambari.server.DuplicateResourceException;
+import org.apache.ambari.server.ObjectNotFoundException;
+import org.apache.ambari.server.ParentObjectNotFoundException;
+import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.ServiceRequest;
 import org.apache.ambari.server.controller.ServiceResponse;
 import org.apache.ambari.server.controller.spi.*;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.commons.lang.StringUtils;
+
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -88,7 +108,7 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     createResources(new Command<Void>() {
       @Override
       public Void invoke() throws AmbariException {
-        getManagementController().createServices(requests);
+        createServices(requests);
         return null;
       }
     });
@@ -110,7 +130,7 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     Set<ServiceResponse> responses = getResources(new Command<Set<ServiceResponse>>() {
       @Override
       public Set<ServiceResponse> invoke() throws AmbariException {
-        return getManagementController().getServices(requests);
+        return getServices(requests);
       }
     });
 
@@ -146,16 +166,16 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
       }
 
       final boolean runSmokeTest = "true".equals(getQueryParameterValue(
-        QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate)) ? true : false;
+          QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate));
 
-      final boolean reconfigureClients = "false".equals(getQueryParameterValue(
-        QUERY_PARAMETERS_RECONFIGURE_CLIENT, predicate)) ? false : true;
+      final boolean reconfigureClients = !"false".equals(getQueryParameterValue(
+          QUERY_PARAMETERS_RECONFIGURE_CLIENT, predicate));
 
       response = modifyResources(new Command<RequestStatusResponse>() {
         @Override
         public RequestStatusResponse invoke() throws AmbariException {
-          return getManagementController().updateServices(requests,
-            request.getRequestInfoProperties(), runSmokeTest, reconfigureClients);
+          return updateServices(requests,
+              request.getRequestInfoProperties(), runSmokeTest, reconfigureClients);
         }
       });
     }
@@ -175,7 +195,7 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
       @Override
       public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().deleteServices(requests);
+        return deleteServices(requests);
       }
     });
 
@@ -204,13 +224,15 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
   }
 
 
-// ----- utility methods -------------------------------------------------
+  // ----- AbstractResourceProvider ----------------------------------------
 
   @Override
   protected Set<String> getPKPropertyIds() {
     return pkPropertyIds;
   }
 
+
+  // ----- utility methods -------------------------------------------------
   /**
    * Get a service request object from a map of property values.
    *
@@ -233,4 +255,508 @@ class ServiceResourceProvider extends AbstractControllerResourceProvider {
     }
     return svcRequest;
   }
+
+  // Create services from the given request.
+  protected synchronized void createServices(Set<ServiceRequest> requests)
+      throws AmbariException {
+
+    if (requests.isEmpty()) {
+      LOG.warn("Received an empty requests set");
+      return;
+    }
+
+    Clusters       clusters       = getManagementController().getClusters();
+    AmbariMetaInfo ambariMetaInfo = getManagementController().getAmbariMetaInfo();
+
+    // do all validation checks
+    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
+    Set<String> duplicates = new HashSet<String>();
+    for (ServiceRequest request : requests) {
+      if (request.getClusterName() == null
+          || request.getClusterName().isEmpty()
+          || request.getServiceName() == null
+          || request.getServiceName().isEmpty()) {
+        throw new IllegalArgumentException("Cluster name and service name"
+            + " should be provided when creating a service");
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Received a createService request"
+            + ", clusterName=" + request.getClusterName()
+            + ", serviceName=" + request.getServiceName()
+            + ", request=" + request);
+      }
+
+      if (!serviceNames.containsKey(request.getClusterName())) {
+        serviceNames.put(request.getClusterName(), new HashSet<String>());
+      }
+      if (serviceNames.get(request.getClusterName())
+          .contains(request.getServiceName())) {
+        // throw error later for dup
+        duplicates.add(request.getServiceName());
+        continue;
+      }
+      serviceNames.get(request.getClusterName()).add(request.getServiceName());
+
+      if (request.getDesiredState() != null
+          && !request.getDesiredState().isEmpty()) {
+        State state = State.valueOf(request.getDesiredState());
+        if (!state.isValidDesiredState()
+            || state != State.INIT) {
+          throw new IllegalArgumentException("Invalid desired state"
+              + " only INIT state allowed during creation"
+              + ", providedDesiredState=" + request.getDesiredState());
+        }
+      }
+
+      Cluster cluster;
+      try {
+        cluster = clusters.getCluster(request.getClusterName());
+      } catch (ClusterNotFoundException e) {
+        throw new ParentObjectNotFoundException("Attempted to add a service to a cluster which doesn't exist", e);
+      }
+      try {
+        Service s = cluster.getService(request.getServiceName());
+        if (s != null) {
+          // throw error later for dup
+          duplicates.add(request.getServiceName());
+          continue;
+        }
+      } catch (ServiceNotFoundException e) {
+        // Expected
+      }
+
+      StackId stackId = cluster.getDesiredStackVersion();
+      if (!ambariMetaInfo.isValidService(stackId.getStackName(),
+          stackId.getStackVersion(), request.getServiceName())) {
+        throw new IllegalArgumentException("Unsupported or invalid service"
+            + " in stack"
+            + ", clusterName=" + request.getClusterName()
+            + ", serviceName=" + request.getServiceName()
+            + ", stackInfo=" + stackId.getStackId());
+      }
+    }
+
+    // ensure only a single cluster update
+    if (serviceNames.size() != 1) {
+      throw new IllegalArgumentException("Invalid arguments, updates allowed"
+          + "on only one cluster at a time");
+    }
+
+    // Validate dups
+    if (!duplicates.isEmpty()) {
+      StringBuilder svcNames = new StringBuilder();
+      boolean first = true;
+      for (String svcName : duplicates) {
+        if (!first) {
+          svcNames.append(",");
+        }
+        first = false;
+        svcNames.append(svcName);
+      }
+      String clusterName = requests.iterator().next().getClusterName();
+      String msg;
+      if (duplicates.size() == 1) {
+        msg = "Attempted to create a service which already exists: "
+            + ", clusterName=" + clusterName  + " serviceName=" + svcNames.toString();
+      } else {
+        msg = "Attempted to create services which already exist: "
+            + ", clusterName=" + clusterName  + " serviceNames=" + svcNames.toString();
+      }
+      throw new DuplicateResourceException(msg);
+    }
+
+    ServiceFactory serviceFactory = getManagementController().getServiceFactory();
+
+    // now to the real work
+    for (ServiceRequest request : requests) {
+      Cluster cluster = clusters.getCluster(request.getClusterName());
+
+      // FIXME initialize configs based off service.configVersions
+      Map<String, Config> configs = new HashMap<String, Config>();
+
+      State state = State.INIT;
+
+      // Already checked that service does not exist
+      Service s = serviceFactory.createNew(cluster, request.getServiceName());
+
+      s.setDesiredState(state);
+      s.updateDesiredConfigs(configs);
+      s.setDesiredStackVersion(cluster.getDesiredStackVersion());
+      cluster.addService(s);
+      s.persist();
+    }
+  }
+
+  // Get services from the given set of requests.
+  protected Set<ServiceResponse> getServices(Set<ServiceRequest> requests)
+      throws AmbariException {
+    Set<ServiceResponse> response = new HashSet<ServiceResponse>();
+    for (ServiceRequest request : requests) {
+      try {
+        response.addAll(getServices(request));
+      } catch (ServiceNotFoundException e) {
+        if (requests.size() == 1) {
+          // only throw exception if 1 request.
+          // there will be > 1 request in case of OR predicate
+          throw e;
+        }
+      }
+    }
+    return response;
+  }
+
+  // Get services from the given request.
+  private synchronized Set<ServiceResponse> getServices(ServiceRequest request)
+      throws AmbariException {
+    if (request.getClusterName() == null
+        || request.getClusterName().isEmpty()) {
+      throw new AmbariException("Invalid arguments, cluster name"
+          + " cannot be null");
+    }
+    Clusters clusters    = getManagementController().getClusters();
+    String   clusterName = request.getClusterName();
+
+    final Cluster cluster;
+    try {
+      cluster = clusters.getCluster(clusterName);
+    } catch (ObjectNotFoundException e) {
+      throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
+    }
+
+    Set<ServiceResponse> response = new HashSet<ServiceResponse>();
+    if (request.getServiceName() != null) {
+      Service s = cluster.getService(request.getServiceName());
+      response.add(s.convertToResponse());
+      return response;
+    }
+
+    // TODO support search on predicates?
+
+    boolean checkDesiredState = false;
+    State desiredStateToCheck = null;
+    if (request.getDesiredState() != null
+        && !request.getDesiredState().isEmpty()) {
+      desiredStateToCheck = State.valueOf(request.getDesiredState());
+      if (!desiredStateToCheck.isValidDesiredState()) {
+        throw new IllegalArgumentException("Invalid arguments, invalid desired"
+            + " state, desiredState=" + desiredStateToCheck);
+      }
+      checkDesiredState = true;
+    }
+
+    for (Service s : cluster.getServices().values()) {
+      if (checkDesiredState
+          && (desiredStateToCheck != s.getDesiredState())) {
+        // skip non matching state
+        continue;
+      }
+      response.add(s.convertToResponse());
+    }
+    return response;
+  }
+
+  // Update services based on the given requests.
+  protected synchronized RequestStatusResponse updateServices(
+      Set<ServiceRequest> requests, Map<String, String> requestProperties,
+      boolean runSmokeTest, boolean reconfigureClients) throws AmbariException {
+
+    AmbariManagementController controller = getManagementController();
+
+    if (requests.isEmpty()) {
+      LOG.warn("Received an empty requests set");
+      return null;
+    }
+
+    Map<State, List<Service>> changedServices
+        = new HashMap<State, List<Service>>();
+    Map<State, List<ServiceComponent>> changedComps =
+        new HashMap<State, List<ServiceComponent>>();
+    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts =
+        new HashMap<String, Map<State, List<ServiceComponentHost>>>();
+    Collection<ServiceComponentHost> ignoredScHosts =
+        new ArrayList<ServiceComponentHost>();
+
+    Set<String> clusterNames = new HashSet<String>();
+    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
+    Set<State> seenNewStates = new HashSet<State>();
+
+    Clusters       clusters       = controller.getClusters();
+    AmbariMetaInfo ambariMetaInfo = controller.getAmbariMetaInfo();
+
+    for (ServiceRequest request : requests) {
+      if (request.getClusterName() == null
+          || request.getClusterName().isEmpty()
+          || request.getServiceName() == null
+          || request.getServiceName().isEmpty()) {
+        throw new IllegalArgumentException("Invalid arguments, cluster name"
+            + " and service name should be provided to update services");
+      }
+
+      LOG.info("Received a updateService request"
+          + ", clusterName=" + request.getClusterName()
+          + ", serviceName=" + request.getServiceName()
+          + ", request=" + request.toString());
+
+      clusterNames.add(request.getClusterName());
+
+      if (clusterNames.size() > 1) {
+        throw new IllegalArgumentException("Updates to multiple clusters is not"
+            + " supported");
+      }
+
+      if (!serviceNames.containsKey(request.getClusterName())) {
+        serviceNames.put(request.getClusterName(), new HashSet<String>());
+      }
+      if (serviceNames.get(request.getClusterName())
+          .contains(request.getServiceName())) {
+        // TODO throw single exception
+        throw new IllegalArgumentException("Invalid request contains duplicate"
+            + " service names");
+      }
+      serviceNames.get(request.getClusterName()).add(request.getServiceName());
+
+      Cluster cluster = clusters.getCluster(request.getClusterName());
+      Service s = cluster.getService(request.getServiceName());
+      State oldState = s.getDesiredState();
+      State newState = null;
+      if (request.getDesiredState() != null) {
+        newState = State.valueOf(request.getDesiredState());
+        if (!newState.isValidDesiredState()) {
+          throw new IllegalArgumentException("Invalid arguments, invalid"
+              + " desired state, desiredState=" + newState);
+        }
+      }
+
+      if (request.getConfigVersions() != null) {
+        State.checkUpdateConfiguration(s, newState);
+
+        for (Map.Entry<String,String> entry :
+            request.getConfigVersions().entrySet()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Attaching config to service"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", serviceName=" + s.getName()
+                + ", configType=" + entry.getKey()
+                + ", configTag=" + entry.getValue());
+          }
+          Config config = cluster.getConfig(
+              entry.getKey(), entry.getValue());
+          if (null == config) {
+            // throw error for invalid config
+            throw new AmbariException("Trying to update service with"
+                + " invalid configs"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", clusterId=" + cluster.getClusterId()
+                + ", serviceName=" + s.getName()
+                + ", invalidConfigType=" + entry.getKey()
+                + ", invalidConfigTag=" + entry.getValue());
+          }
+        }
+      }
+
+
+      if (newState == null) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Nothing to do for new updateService request"
+              + ", clusterName=" + request.getClusterName()
+              + ", serviceName=" + request.getServiceName()
+              + ", newDesiredState=null");
+        }
+        continue;
+      }
+
+      seenNewStates.add(newState);
+
+      if (newState != oldState) {
+        if (!State.isValidDesiredStateTransition(oldState, newState)) {
+          throw new AmbariException("Invalid transition for"
+              + " service"
+              + ", clusterName=" + cluster.getClusterName()
+              + ", clusterId=" + cluster.getClusterId()
+              + ", serviceName=" + s.getName()
+              + ", currentDesiredState=" + oldState
+              + ", newDesiredState=" + newState);
+
+        }
+        if (!changedServices.containsKey(newState)) {
+          changedServices.put(newState, new ArrayList<Service>());
+        }
+        changedServices.get(newState).add(s);
+      }
+
+      // TODO should we check whether all servicecomponents and
+      // servicecomponenthosts are in the required desired state?
+
+      for (ServiceComponent sc : s.getServiceComponents().values()) {
+        State oldScState = sc.getDesiredState();
+        if (newState != oldScState) {
+          if (sc.isClientComponent() &&
+              !newState.isValidClientComponentState()) {
+            continue;
+          }
+          if (!State.isValidDesiredStateTransition(oldScState, newState)) {
+            throw new AmbariException("Invalid transition for"
+                + " servicecomponent"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", clusterId=" + cluster.getClusterId()
+                + ", serviceName=" + sc.getServiceName()
+                + ", componentName=" + sc.getName()
+                + ", currentDesiredState=" + oldScState
+                + ", newDesiredState=" + newState);
+          }
+          if (!changedComps.containsKey(newState)) {
+            changedComps.put(newState, new ArrayList<ServiceComponent>());
+          }
+          changedComps.get(newState).add(sc);
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Handling update to ServiceComponent"
+              + ", clusterName=" + request.getClusterName()
+              + ", serviceName=" + s.getName()
+              + ", componentName=" + sc.getName()
+              + ", currentDesiredState=" + oldScState
+              + ", newDesiredState=" + newState);
+        }
+        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()){
+          State oldSchState = sch.getState();
+          if (oldSchState == State.MAINTENANCE || oldSchState == State.UNKNOWN) {
+            //Ignore host components updates in this state
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Ignoring ServiceComponentHost"
+                  + ", clusterName=" + request.getClusterName()
+                  + ", serviceName=" + s.getName()
+                  + ", componentName=" + sc.getName()
+                  + ", hostname=" + sch.getHostName()
+                  + ", currentState=" + oldSchState
+                  + ", newDesiredState=" + newState);
+            }
+            continue;
+          }
+          if (newState == oldSchState) {
+            ignoredScHosts.add(sch);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Ignoring ServiceComponentHost"
+                  + ", clusterName=" + request.getClusterName()
+                  + ", serviceName=" + s.getName()
+                  + ", componentName=" + sc.getName()
+                  + ", hostname=" + sch.getHostName()
+                  + ", currentState=" + oldSchState
+                  + ", newDesiredState=" + newState);
+            }
+            continue;
+          }
+          if (sc.isClientComponent() &&
+              !newState.isValidClientComponentState()) {
+            continue;
+          }
+          /**
+           * This is hack for now wherein we don't fail if the
+           * sch is in INSTALL_FAILED
+           */
+          if (!State.isValidStateTransition(oldSchState, newState)) {
+            String error = "Invalid transition for"
+                + " servicecomponenthost"
+                + ", clusterName=" + cluster.getClusterName()
+                + ", clusterId=" + cluster.getClusterId()
+                + ", serviceName=" + sch.getServiceName()
+                + ", componentName=" + sch.getServiceComponentName()
+                + ", hostname=" + sch.getHostName()
+                + ", currentState=" + oldSchState
+                + ", newDesiredState=" + newState;
+            StackId sid = cluster.getDesiredStackVersion();
+
+            if ( ambariMetaInfo.getComponentCategory(
+                sid.getStackName(), sid.getStackVersion(), sc.getServiceName(),
+                sch.getServiceComponentName()).isMaster()) {
+              throw new AmbariException(error);
+            } else {
+              LOG.warn("Ignoring: " + error);
+              continue;
+            }
+          }
+          if (!changedScHosts.containsKey(sc.getName())) {
+            changedScHosts.put(sc.getName(),
+                new HashMap<State, List<ServiceComponentHost>>());
+          }
+          if (!changedScHosts.get(sc.getName()).containsKey(newState)) {
+            changedScHosts.get(sc.getName()).put(newState,
+                new ArrayList<ServiceComponentHost>());
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Handling update to ServiceComponentHost"
+                + ", clusterName=" + request.getClusterName()
+                + ", serviceName=" + s.getName()
+                + ", componentName=" + sc.getName()
+                + ", hostname=" + sch.getHostName()
+                + ", currentState=" + oldSchState
+                + ", newDesiredState=" + newState);
+          }
+          changedScHosts.get(sc.getName()).get(newState).add(sch);
+        }
+      }
+    }
+
+    if (seenNewStates.size() > 1) {
+      // TODO should we handle this scenario
+      throw new IllegalArgumentException("Cannot handle different desired state"
+          + " changes for a set of services at the same time");
+    }
+
+    for (ServiceRequest request : requests) {
+      Cluster cluster = clusters.getCluster(request.getClusterName());
+      Service s = cluster.getService(request.getServiceName());
+      if (request.getConfigVersions() != null) {
+        Map<String, Config> updated = new HashMap<String, Config>();
+
+        for (Map.Entry<String,String> entry : request.getConfigVersions().entrySet()) {
+          Config config = cluster.getConfig(entry.getKey(), entry.getValue());
+          updated.put(config.getType(), config);
+        }
+
+        if (!updated.isEmpty()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Updating service configs, attaching configs"
+                + ", clusterName=" + request.getClusterName()
+                + ", serviceName=" + s.getName()
+                + ", configCount=" + updated.size());
+          }
+          s.updateDesiredConfigs(updated);
+          s.persist();
+        }
+
+        for (ServiceComponent sc : s.getServiceComponents().values()) {
+          sc.deleteDesiredConfigs(updated.keySet());
+          for (ServiceComponentHost sch :
+              sc.getServiceComponentHosts().values()) {
+            sch.deleteDesiredConfigs(updated.keySet());
+            sch.persist();
+          }
+          sc.persist();
+        }
+      }
+    }
+
+    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());
+
+    return controller.createStages(cluster, requestProperties, null, changedServices, changedComps, changedScHosts,
+        ignoredScHosts, runSmokeTest, reconfigureClients);
+  }
+
+  // Delete services based on the given set of requests
+  protected RequestStatusResponse deleteServices(Set<ServiceRequest> request)
+      throws AmbariException {
+
+    Clusters clusters    = getManagementController().getClusters();
+
+    for (ServiceRequest serviceRequest : request) {
+      if (StringUtils.isEmpty(serviceRequest.getClusterName()) || StringUtils.isEmpty(serviceRequest.getServiceName())) {
+        // FIXME throw correct error
+        throw new AmbariException("invalid arguments");
+      } else {
+        clusters.getCluster(serviceRequest.getClusterName()).deleteService(serviceRequest.getServiceName());
+      }
+    }
+    return null;
+  }
 }

+ 156 - 19
ambari-server/src/main/java/org/apache/ambari/server/state/State.java

@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.state;
 
+import org.apache.ambari.server.AmbariException;
+
 public enum State {
   /**
    * Initial/Clean state.
@@ -96,25 +98,6 @@ public enum State {
     }
   }
 
-  /**
-   * Indicates whether or not its a state indicating a task in progress.
-   *
-   * @return true if this is a state indicating progress.
-   */
-  public boolean isInProgressState() {
-    switch (State.values()[this.state]) {
-      case INSTALLING:
-      case STARTING:
-      case STOPPING:
-      case UNINSTALLING:
-      case WIPING_OUT:
-      case UPGRADING:
-        return true;
-      default:
-        return false;
-    }
-  }
-
   /**
    * Indicates whether or not it is a valid state for the client component.
    *
@@ -150,4 +133,158 @@ public enum State {
         return false;
     }
   }
+
+  /**
+   * Utility method to determine whether or not a valid transition can be made from the given states.
+   *
+   * @param startState    the starting state
+   * @param desiredState  the desired state
+   *
+   * @return true iff a valid transition can be made from the starting state to the desired state
+   */
+  public static boolean isValidStateTransition(State startState, State desiredState) {
+    switch(desiredState) {
+      case INSTALLED:
+        if (startState == State.INIT
+            || startState == State.UNINSTALLED
+            || startState == State.INSTALLED
+            || startState == State.INSTALLING
+            || startState == State.STARTED
+            || startState == State.INSTALL_FAILED
+            || startState == State.UPGRADING
+            || startState == State.STOPPING
+            || startState == State.UNKNOWN
+            || startState == State.MAINTENANCE) {
+          return true;
+        }
+        break;
+      case STARTED:
+        if (startState == State.INSTALLED
+            || startState == State.STARTING
+            || startState == State.STARTED) {
+          return true;
+        }
+        break;
+      case UNINSTALLED:
+        if (startState == State.INSTALLED
+            || startState == State.UNINSTALLED
+            || startState == State.UNINSTALLING) {
+          return true;
+        }
+      case INIT:
+        if (startState == State.UNINSTALLED
+            || startState == State.INIT
+            || startState == State.WIPING_OUT) {
+          return true;
+        }
+      case MAINTENANCE:
+        if (startState == State.INSTALLED
+            || startState == State.UNKNOWN) {
+          return true;
+        }
+    }
+    return false;
+  }
+
+  /**
+   * Utility method to determine whether or not the given desired state is valid for the given starting state.
+   *
+   * @param startState    the starting state
+   * @param desiredState  the desired state
+   *
+   * @return true iff the given desired state is valid for the given starting state
+   */
+  public static boolean isValidDesiredStateTransition(State startState, State desiredState) {
+    switch(desiredState) {
+      case INSTALLED:
+        if (startState == State.INIT
+            || startState == State.UNINSTALLED
+            || startState == State.INSTALLED
+            || startState == State.STARTED
+            || startState == State.STOPPING) {
+          return true;
+        }
+        break;
+      case STARTED:
+        if (startState == State.INSTALLED
+            || startState == State.STARTED) {
+          return true;
+        }
+        break;
+    }
+    return false;
+  }
+
+  /**
+   * Determine whether or not it is safe to update the configuration of the given service
+   * component host for the given states.
+   *
+   * @param serviceComponentHost  the service component host
+   * @param currentState          the current state
+   * @param desiredState          the desired state
+   *
+   * @throws AmbariException if the changing of configuration is not supported
+   */
+  public static void checkUpdateConfiguration(
+      ServiceComponentHost serviceComponentHost,
+      State currentState, State desiredState)
+      throws AmbariException {
+
+    if (desiredState != null) {
+      if (!(desiredState == State.INIT
+          || desiredState == State.INSTALLED
+          || desiredState == State.STARTED)) {
+        throw new AmbariException("Changing of configs not supported"
+            + " for this transition"
+            + ", clusterName=" + serviceComponentHost.getClusterName()
+            + ", serviceName=" + serviceComponentHost.getServiceName()
+            + ", componentName=" + serviceComponentHost.getServiceComponentName()
+            + ", hostname=" + serviceComponentHost.getHostName()
+            + ", currentState=" + currentState
+            + ", newDesiredState=" + desiredState);
+      }
+    }
+  }
+
+  /**
+   * Determine whether or not it is safe to update the configuration of the given service
+   * component for the given state.
+   *
+   * @param serviceComponent  the service component
+   * @param desiredState      the desired state
+   *
+   * @throws AmbariException if the changing of configuration is not supported
+   */
+  public static void checkUpdateConfiguration(
+      ServiceComponent serviceComponent,
+      State desiredState)
+      throws AmbariException {
+    for (ServiceComponentHost sch :
+        serviceComponent.getServiceComponentHosts().values()) {
+      checkUpdateConfiguration(sch,
+          sch.getState(), desiredState);
+    }
+  }
+
+  /**
+   * Determine whether or not it is safe to update the configuration of the given service
+   * for the given state.
+   *
+   * @param service       the service
+   * @param desiredState  the desired state
+   *
+   * @throws AmbariException if the changing of configuration is not supported
+   */
+  public static void checkUpdateConfiguration(Service service,
+                                              State desiredState)
+      throws AmbariException {
+    for (ServiceComponent component :
+        service.getServiceComponents().values()) {
+      checkUpdateConfiguration(component,
+          desiredState);
+    }
+  }
+
+
+
 }

+ 20 - 854
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java

@@ -18,45 +18,16 @@
 
 package org.apache.ambari.server.controller;
 
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.lang.reflect.Type;
-import java.text.MessageFormat;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.gson.Gson;
+import com.google.inject.Injector;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.ParentObjectNotFoundException;
-import org.apache.ambari.server.Role;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -64,20 +35,27 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.Predicate;
 import org.easymock.Capture;
 import org.junit.Test;
 
-import com.google.gson.Gson;
-import com.google.gson.reflect.TypeToken;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * AmbariManagementControllerImpl unit tests
@@ -285,151 +263,6 @@ public class AmbariManagementControllerImplTest {
     verify(injector, clusters, cluster, cluster2, response, response2);
   }
 
-  @Test
-  public void testGetServices() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-    ServiceResponse response = createNiceMock(ServiceResponse.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
-
-    expect(service.convertToResponse()).andReturn(response);
-    // replay mocks
-    replay(injector, clusters, cluster, service, response);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceResponse> setResponses = controller.getServices(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, service, response);
-  }
-
-  /**
-   * Ensure that ServiceNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetServices___ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andThrow(new ServiceNotFoundException("custer1", "service1"));
-
-    // replay mocks
-    replay(injector, clusters, cluster);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getServices(setRequests);
-      fail("expected ServiceNotFoundException");
-    } catch (ServiceNotFoundException e) {
-      // expected
-    }
-
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster);
-  }
-
-  /**
-   * Ensure that ServiceNotFoundException is handled where there are multiple requests as would be the
-   * case when an OR predicate is provided in the query.
-   */
-  @Test
-  public void testGetServices___OR_Predicate_ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service1 = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
-    ServiceResponse response = createNiceMock(ServiceResponse.class);
-    ServiceResponse response2 = createNiceMock(ServiceResponse.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request2 = new ServiceRequest("cluster1", "service2", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request3 = new ServiceRequest("cluster1", "service3", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request4 = new ServiceRequest("cluster1", "service4", Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-    setRequests.add(request4);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
-    expect(cluster.getService("service1")).andReturn(service1);
-    expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
-    expect(cluster.getService("service3")).andThrow(new ServiceNotFoundException("cluster1", "service3"));
-    expect(cluster.getService("service4")).andReturn(service2);
-
-    expect(service1.convertToResponse()).andReturn(response);
-    expect(service2.convertToResponse()).andReturn(response2);
-    // replay mocks
-    replay(injector, clusters, cluster, service1, service2, response, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceResponse> setResponses = controller.getServices(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, service1, service2, response, response2);
-  }
-
   @Test
   public void testGetComponents() throws Exception {
     // member state mocks
@@ -1571,671 +1404,4 @@ public class AmbariManagementControllerImplTest {
     verify(injector, clusters, cluster, response1, response2, response3, stack, metaInfo, service1, service2,
         component1, component2, componentHost1, componentHost2, componentHost3);
   }
-
-  @Test
-  public void testMaintenanceAndDeleteStates() throws Exception {
-    Map<String,String> mapRequestProps = new HashMap<String, String>();
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-        
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/main/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "target/version");
-        properties.setProperty(Configuration.OS_VERSION_KEY,
-            "centos5");
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-    
-    try {
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-      Clusters clusters = injector.getInstance(Clusters.class);
-      Gson gson = new Gson();
-  
-      clusters.addHost("host1");
-      clusters.addHost("host2");
-      clusters.addHost("host3");
-      Host host = clusters.getHost("host1");
-      host.setOsType("centos5");
-      host.persist();
-      host = clusters.getHost("host2");
-      host.setOsType("centos5");
-      host.persist();
-      host = clusters.getHost("host3");
-      host.setOsType("centos5");
-      host.persist();
-  
-      ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
-      amc.createCluster(clusterRequest);
-  
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-  
-      amc.createServices(serviceRequests);
-  
-      Type confType = new TypeToken<Map<String, String>>() {
-      }.getType();
-  
-      ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1",
-          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-  
-      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1",
-          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-  
-      configurationRequest = new ConfigurationRequest("c1", "global", "version1",
-          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-  
-  
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS",
-          gson.<Map<String, String>>fromJson("{\"core-site\": \"version1\", \"hdfs-site\": \"version1\", \"global\" : \"version1\" }", confType)
-          , null));
-  
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-  
-      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
-  
-      amc.createComponents(serviceComponentRequests);
-  
-      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
-      hostRequests.add(new HostRequest("host1", "c1", null));
-      hostRequests.add(new HostRequest("host2", "c1", null));
-      hostRequests.add(new HostRequest("host3", "c1", null));
-  
-      amc.createHosts(hostRequests);
-  
-      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
-  
-  
-      amc.createHostComponents(componentHostRequests);
-  
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-  
-      Cluster cluster = clusters.getCluster("c1");
-      Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes.size());
-  
-      ServiceComponentHost componentHost = namenodes.get("host1");
-  
-      Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
-      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-        ServiceComponentHost cHost = entry.getValue();
-        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-      }
-      hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-        ServiceComponentHost cHost = entry.getValue();
-        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-      }
-      hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
-      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-        ServiceComponentHost cHost = entry.getValue();
-        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-      }
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      assertEquals(State.MAINTENANCE, componentHost.getState());
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "INSTALLED"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      assertEquals(State.INSTALLED, componentHost.getState());
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      assertEquals(State.MAINTENANCE, componentHost.getState());
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, null));
-  
-      amc.createHostComponents(componentHostRequests);
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, "INSTALLED"));
-  
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-  
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(2, namenodes.size());
-  
-      componentHost = namenodes.get("host2");
-      componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-      componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis()));
-  
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-  
-      RequestStatusResponse response = amc.updateServices(serviceRequests,
-        mapRequestProps, true, false);
-      for (ShortTaskStatus shortTaskStatus : response.getTasks()) {
-        assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole()));
-      }
-  
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-  
-      amc.deleteHostComponents(componentHostRequests);
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes.size());
-  
-      // testing the behavior for runSmokeTest flag
-      // piggybacking on this test to avoid setting up the mock cluster
-      testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests);
-  
-      // should be able to add the host component back
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-      amc.createHostComponents(componentHostRequests);
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(2, namenodes.size());
-      
-      
-      // make unknown
-      ServiceComponentHost sch = null;
-      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
-        if (tmp.getServiceComponentName().equals("DATANODE")) {
-          tmp.setState(State.UNKNOWN);
-          sch = tmp;
-        }
-      }
-      assertNotNull(sch);
-  
-      // make maintenance
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, "MAINTENANCE"));
-      amc.updateHostComponents(componentHostRequests, mapRequestProps, false);
-      assertEquals(State.MAINTENANCE, sch.getState ());
-      
-      // confirm delete
-      componentHostRequests.clear();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-      amc.deleteHostComponents(componentHostRequests);
-      
-      sch = null;
-      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
-        if (tmp.getServiceComponentName().equals("DATANODE")) {
-          sch = tmp;
-        }
-      }
-      assertNull(sch);
-    
-      /*
-      *Test remove service
-      */
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", null, null, null));
-      assertEquals(1, amc.getServices(serviceRequests).size());
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-      amc.deleteServices(serviceRequests);
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", null, null, null));     
-      assertEquals(0, amc.getServices(serviceRequests).size());
-      
-      /*
-      *Test add service again
-      */
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-      amc.createServices(serviceRequests);
-      assertEquals(1, amc.getServices(serviceRequests).size());
-      //Create new configs
-      configurationRequest = new ConfigurationRequest("c1", "core-site", "version2",
-          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2",
-          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);
-      configurationRequest = new ConfigurationRequest("c1", "global", "version2",
-          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
-      );
-      amc.createConfiguration(configurationRequest);    
-      //Add configs to service
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS",
-          gson.<Map<String, String>>fromJson("{\"core-site\": \"version2\", \"hdfs-site\": \"version2\", \"global\" : \"version2\" }", confType)
-          , null));
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-      //Crate service components
-      serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
-      amc.createComponents(serviceComponentRequests);
-      
-      //Create ServiceComponentHosts
-      componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
-      amc.createHostComponents(componentHostRequests);    
-  
-      
-      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes.size());
-      Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
-      assertEquals(3, datanodes.size());
-      Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
-      assertEquals(1, namenodes2.size());
-    } finally {
-      injector.getInstance(PersistService.class).stop();
-    }    
-  }
-
-  private void testRunSmokeTestFlag(Map<String, String> mapRequestProps,
-                                    AmbariManagementController amc,
-                                    Set<ServiceRequest> serviceRequests)
-      throws AmbariException {
-    RequestStatusResponse response;//Starting HDFS service. No run_smoke_test flag is set, smoke
-
-    //Stopping HDFS service
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps, false,
-      false);
-
-    //Starting HDFS service. No run_smoke_test flag is set, smoke
-    // test(HDFS_SERVICE_CHECK) won't run
-    boolean runSmokeTest = false;
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps,
-      runSmokeTest, false);
-
-    List<ShortTaskStatus> taskStatuses = response.getTasks();
-    boolean smokeTestRequired = false;
-    for (ShortTaskStatus shortTaskStatus : taskStatuses) {
-      if (shortTaskStatus.getRole().equals(Role.HDFS_SERVICE_CHECK.toString())) {
-         smokeTestRequired= true;
-      }
-    }
-    assertFalse(smokeTestRequired);
-
-    //Stopping HDFS service
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps, false,
-      false);
-
-    //Starting HDFS service again.
-    //run_smoke_test flag is set, smoke test will be run
-    runSmokeTest = true;
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-    response = amc.updateServices(serviceRequests, mapRequestProps,
-      runSmokeTest, false);
-
-    taskStatuses = response.getTasks();
-    smokeTestRequired = false;
-    for (ShortTaskStatus shortTaskStatus : taskStatuses) {
-      if (shortTaskStatus.getRole().equals(Role.HDFS_SERVICE_CHECK.toString())) {
-        smokeTestRequired= true;
-      }
-    }
-    assertTrue(smokeTestRequired);
-  }
-
-
-  @Test
-  public void testScheduleSmokeTest() throws Exception {
-
-    final String HOST1 = "host1";
-    final String OS_TYPE = "centos5";
-    final String STACK_ID = "HDP-2.0.1";
-    final String CLUSTER_NAME = "c1";
-    final String HDFS_SERVICE_CHECK_ROLE = "HDFS_SERVICE_CHECK";
-    final String MAPREDUCE2_SERVICE_CHECK_ROLE = "MAPREDUCE2_SERVICE_CHECK";
-    final String YARN_SERVICE_CHECK_ROLE = "YARN_SERVICE_CHECK";
-
-    Map<String,String> mapRequestProps = Collections.<String,String>emptyMap();
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/test/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "../version");
-        properties.setProperty(Configuration.OS_VERSION_KEY, OS_TYPE);
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-    
-    try {
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-      Clusters clusters = injector.getInstance(Clusters.class);
-  
-      clusters.addHost(HOST1);
-      Host host = clusters.getHost(HOST1);
-      host.setOsType(OS_TYPE);
-      host.persist();
-  
-      ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
-      amc.createCluster(clusterRequest);
-  
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
-  
-      amc.createServices(serviceRequests);
-  
-      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
-  
-      amc.createComponents(serviceComponentRequests);
-  
-      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
-      hostRequests.add(new HostRequest(HOST1, CLUSTER_NAME, null));
-  
-      amc.createHosts(hostRequests);
-  
-      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
-  
-      amc.createHostComponents(componentHostRequests);
-  
-      //Install services
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.INSTALLED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.INSTALLED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.INSTALLED.name()));
-  
-      amc.updateServices(serviceRequests, mapRequestProps, true, false);
-  
-      Cluster cluster = clusters.getCluster(CLUSTER_NAME);
-  
-      for (String serviceName : cluster.getServices().keySet() ) {
-  
-        for(String componentName: cluster.getService(serviceName).getServiceComponents().keySet()) {
-  
-          Map<String, ServiceComponentHost> serviceComponentHosts = cluster.getService(serviceName).getServiceComponent(componentName).getServiceComponentHosts();
-  
-          for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHosts.entrySet()) {
-            ServiceComponentHost cHost = entry.getValue();
-            cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), STACK_ID));
-            cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-          }
-        }
-      }
-  
-      //Start services
-      serviceRequests.clear();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.STARTED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.STARTED.name()));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.STARTED.name()));
-  
-      RequestStatusResponse response = amc.updateServices(serviceRequests,
-        mapRequestProps, true, false);
-  
-      Collection<?> hdfsSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(HDFS_SERVICE_CHECK_ROLE));
-      //Ensure that smoke test task was created for HDFS
-      assertEquals(1, hdfsSmokeTasks.size());
-  
-      Collection<?> mapreduce2SmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(MAPREDUCE2_SERVICE_CHECK_ROLE));
-      //Ensure that smoke test task was created for MAPREDUCE2
-      assertEquals(1, mapreduce2SmokeTasks.size());
-  
-      Collection<?> yarnSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(YARN_SERVICE_CHECK_ROLE));
-      //Ensure that smoke test task was created for YARN
-      assertEquals(1, yarnSmokeTasks.size());
-    } finally {
-      injector.getInstance(PersistService.class).stop();
-    }
-  }
-
-  private class RolePredicate implements Predicate {
-
-    private String role;
-
-    public RolePredicate(String role) {
-      this.role = role;
-    }
-
-    @Override
-    public boolean evaluate(Object obj) {
-      ShortTaskStatus task = (ShortTaskStatus)obj;
-      return task.getRole().equals(role);
-    }
-  }
-
-  
-  @Test
-  public void testDeleteClusterCreateHost() throws Exception {
-    
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/test/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "../version");
-        properties.setProperty(Configuration.OS_VERSION_KEY, "centos6");
-        
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-    
-    
-    String STACK_ID = "HDP-2.0.1";
-    String CLUSTER_NAME = "c1";
-    String HOST1 = "h1";
-    String HOST2 = "h2";
-    
-    try {
-      Clusters clusters = injector.getInstance(Clusters.class);
-      
-      clusters.addHost(HOST1);
-      Host host = clusters.getHost(HOST1);
-      host.setOsType("centos6");
-      host.persist();      
-      
-      clusters.addHost(HOST2);
-      host = clusters.getHost(HOST2);
-      host.setOsType("centos6");
-      host.persist();      
-
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-    
-      ClusterRequest cr = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
-      amc.createCluster(cr);
-      
-      ConfigurationRequest configRequest = new ConfigurationRequest(CLUSTER_NAME, "global", "version1",
-          new HashMap<String, String>() {{ put("a", "b"); }});
-      cr.setDesiredConfig(configRequest);
-      amc.updateClusters(Collections.singleton(cr), new HashMap<String, String>());
-      
-      // add some hosts
-      Set<HostRequest> hrs = new HashSet<HostRequest>();
-      hrs.add(new HostRequest(HOST1, CLUSTER_NAME, null));
-      amc.createHosts(hrs);
-      
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
-      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
-  
-      amc.createServices(serviceRequests);
-  
-      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
-      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", null, null));
-  
-      amc.createComponents(serviceComponentRequests);
-  
-      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
-      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HDFS_CLIENT", HOST1, null, null));
-  
-      amc.createHostComponents(componentHostRequests);
-      
-      ActionRequest ar = new ActionRequest(CLUSTER_NAME, "HDFS", Role.HDFS_SERVICE_CHECK.name(), new HashMap<String, String>());
-      amc.createActions(Collections.singleton(ar), null);
-  
-      // change mind, delete the cluster
-      amc.deleteCluster(cr);
-      
-      assertNotNull(clusters.getHost(HOST1));
-      assertNotNull(clusters.getHost(HOST2));
-      
-      HostDAO dao = injector.getInstance(HostDAO.class);
-      
-      assertNotNull(dao.findByName(HOST1));
-      assertNotNull(dao.findByName(HOST2));
-      
-    } finally {
-      injector.getInstance(PersistService.class).stop();
-    }     
-    
-  }
-
-  @Test
-  public void testApplyConfigurationWithTheSameTag() {
-    Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        Properties properties = new Properties();
-        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
-        properties.setProperty(Configuration.METADETA_DIR_PATH,
-            "src/main/resources/stacks");
-        properties.setProperty(Configuration.SERVER_VERSION_FILE,
-            "target/version");
-        properties.setProperty(Configuration.OS_VERSION_KEY,
-            "centos6");
-        try {
-          install(new ControllerModule(properties));
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-      }
-    });
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    String tag = "version1";
-    String type = "core-site";
-    AmbariException exception = null;
-    try {
-      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-      Clusters clusters = injector.getInstance(Clusters.class);
-      Gson gson = new Gson();
-
-      clusters.addHost("host1");
-      clusters.addHost("host2");
-      clusters.addHost("host3");
-      Host host = clusters.getHost("host1");
-      host.setOsType("centos6");
-      host.persist();
-      host = clusters.getHost("host2");
-      host.setOsType("centos6");
-      host.persist();
-      host = clusters.getHost("host3");
-      host.setOsType("centos6");
-      host.persist();
-
-      ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
-      amc.createCluster(clusterRequest);
-
-      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-
-      amc.createServices(serviceRequests);
-
-      Type confType = new TypeToken<Map<String, String>>() {
-      }.getType();
-
-      ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", type, tag,
-          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType));
-      amc.createConfiguration(configurationRequest);
-
-      amc.createConfiguration(configurationRequest);
-    } catch (AmbariException e) {
-      exception = e;
-    }
-
-    assertNotNull(exception);
-    String exceptionMessage = MessageFormat.format("Configuration with tag ''{0}'' exists for ''{1}''",
-        tag, type);
-    assertEquals(exceptionMessage, exception.getMessage());
-  }
 }

+ 926 - 110
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -18,27 +18,13 @@
 
 package org.apache.ambari.server.controller;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import javax.persistence.EntityManager;
-
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
 import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
@@ -47,6 +33,7 @@ import org.apache.ambari.server.ObjectNotFoundException;
 import org.apache.ambari.server.ParentObjectNotFoundException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.actionmanager.ActionDBAccessor;
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
@@ -56,9 +43,11 @@ import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
+import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.RoleDAO;
 import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
 import org.apache.ambari.server.orm.entities.RoleEntity;
@@ -90,17 +79,44 @@ import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartedEve
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStoppedEvent;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.CollectionUtils;
+import org.easymock.Capture;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import javax.persistence.EntityManager;
+import java.io.File;
+import java.lang.reflect.Type;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class AmbariManagementControllerTest {
 
@@ -129,8 +145,6 @@ public class AmbariManagementControllerTest {
   
   private static final String REQUEST_CONTEXT_PROPERTY = "context";
 
-  private static final int CONFIG_MAP_CNT = 21;
-
   private AmbariManagementController controller;
   private Clusters clusters;
   private ActionDBAccessor actionDB;
@@ -189,7 +203,8 @@ public class AmbariManagementControllerTest {
         dStateStr);
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
     requests.add(r1);
-    controller.createServices(requests);
+
+    ServiceResourceProviderTest.createServices(controller, requests);
   }
 
   private void createServiceComponent(String clusterName,
@@ -231,7 +246,7 @@ public class AmbariManagementControllerTest {
     requests.add(r);
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
-    RequestStatusResponse resp = controller.updateServices(requests,
+    RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller, requests,
       mapRequestProps, runSmokeTests, reconfigureClients);
 
     Assert.assertEquals(State.INSTALLED,
@@ -289,7 +304,7 @@ public class AmbariManagementControllerTest {
     requests.add(r);
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
-    RequestStatusResponse resp = controller.updateServices(requests,
+    RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller, requests,
         mapRequestProps, runSmokeTests, reconfigureClients);
 
     Assert.assertEquals(State.STARTED,
@@ -320,7 +335,7 @@ public class AmbariManagementControllerTest {
     requests.add(r);
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
-    RequestStatusResponse resp = controller.updateServices(requests,
+    RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller, requests,
         mapRequestProps, runSmokeTests, reconfigureClients);
 
     Assert.assertEquals(State.INSTALLED,
@@ -340,6 +355,10 @@ public class AmbariManagementControllerTest {
     }
   }
 
+  private boolean checkExceptionType(Throwable e, Class<? extends Exception> exceptionClass) {
+    return e != null && (exceptionClass.isAssignableFrom(e.getClass()) || checkExceptionType(e.getCause(), exceptionClass));
+  }
+
   @Test
   public void testCreateClusterSimple() throws AmbariException {
     String clusterName = "foo1";
@@ -450,7 +469,7 @@ public class AmbariManagementControllerTest {
         null, null);
 
     Set<ServiceResponse> r =
-        controller.getServices(Collections.singleton(req));
+        ServiceResourceProviderTest.getServices(controller, Collections.singleton(req));
     Assert.assertEquals(1, r.size());
     ServiceResponse resp = r.iterator().next();
     Assert.assertEquals(serviceName, resp.getServiceName());
@@ -474,7 +493,7 @@ public class AmbariManagementControllerTest {
       set1.clear();
       ServiceRequest rInvalid = new ServiceRequest(null, null, null, null);
       set1.add(rInvalid);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for invalid requests");
     } catch (Exception e) {
       // Expected
@@ -484,7 +503,7 @@ public class AmbariManagementControllerTest {
       set1.clear();
       ServiceRequest rInvalid = new ServiceRequest("foo", null, null, null);
       set1.add(rInvalid);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for invalid requests");
     } catch (Exception e) {
       // Expected
@@ -494,10 +513,11 @@ public class AmbariManagementControllerTest {
       set1.clear();
       ServiceRequest rInvalid = new ServiceRequest("foo", "bar", null, null);
       set1.add(rInvalid);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for invalid cluster");
-    } catch (ParentObjectNotFoundException e) {
+    } catch (AmbariException e) {
       // Expected
+      Assert.assertTrue(checkExceptionType(e, ParentObjectNotFoundException.class));
     }
 
     clusters.addCluster("foo");
@@ -511,7 +531,7 @@ public class AmbariManagementControllerTest {
       ServiceRequest valid2 = new ServiceRequest("foo", "HDFS", null, null);
       set1.add(valid1);
       set1.add(valid2);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for invalid requests");
     } catch (Exception e) {
       // Expected
@@ -521,7 +541,7 @@ public class AmbariManagementControllerTest {
       set1.clear();
       ServiceRequest valid1 = new ServiceRequest("foo", "bar", null, null);
       set1.add(valid1);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for invalid service");
     } catch (Exception e) {
       // Expected
@@ -534,7 +554,7 @@ public class AmbariManagementControllerTest {
       ServiceRequest valid2 = new ServiceRequest("bar", "HDFS", null, null);
       set1.add(valid1);
       set1.add(valid2);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for multiple clusters");
     } catch (Exception e) {
       // Expected
@@ -546,7 +566,7 @@ public class AmbariManagementControllerTest {
     set1.clear();
     ServiceRequest valid = new ServiceRequest("foo", "HDFS", null, null);
     set1.add(valid);
-    controller.createServices(set1);
+    ServiceResourceProviderTest.createServices(controller, set1);
 
     try {
       set1.clear();
@@ -554,7 +574,7 @@ public class AmbariManagementControllerTest {
       ServiceRequest valid2 = new ServiceRequest("foo", "HDFS", null, null);
       set1.add(valid1);
       set1.add(valid2);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for existing service");
     } catch (Exception e) {
       // Expected
@@ -594,7 +614,7 @@ public class AmbariManagementControllerTest {
     createService(clusterName, serviceName2, State.INIT);
 
     ServiceRequest r = new ServiceRequest(clusterName, null, null, null);
-    Set<ServiceResponse> response = controller.getServices(Collections.singleton(r));
+    Set<ServiceResponse> response = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(2, response.size());
 
     for (ServiceResponse svc : response) {
@@ -615,17 +635,18 @@ public class AmbariManagementControllerTest {
     ServiceRequest valid2 = new ServiceRequest("foo", "MAPREDUCE", null, null);
     set1.add(valid1);
     set1.add(valid2);
-    controller.createServices(set1);
+    ServiceResourceProviderTest.createServices(controller, set1);
 
     try {
       valid1 = new ServiceRequest("foo", "PIG", null, null);
       valid2 = new ServiceRequest("foo", "MAPREDUCE", null, null);
       set1.add(valid1);
       set1.add(valid2);
-      controller.createServices(set1);
+      ServiceResourceProviderTest.createServices(controller, set1);
       fail("Expected failure for invalid services");
-    } catch (DuplicateResourceException e) {
+    } catch (AmbariException e) {
       // Expected
+      Assert.assertTrue(checkExceptionType(e, DuplicateResourceException.class));
     }
 
     Assert.assertNotNull(clusters.getCluster("foo"));
@@ -1479,7 +1500,7 @@ public class AmbariManagementControllerTest {
     requests.add(r);
 
     RequestStatusResponse trackAction =
-        controller.updateServices(requests, mapRequestProps, true, false);
+        ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
         clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
@@ -1601,7 +1622,7 @@ public class AmbariManagementControllerTest {
         State.STARTED.toString());
     requests.clear();
     requests.add(r);
-    trackAction = controller.updateServices(requests, mapRequestProps, true,
+    trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true,
       false);
 
     Assert.assertEquals(State.STARTED,
@@ -1666,7 +1687,7 @@ public class AmbariManagementControllerTest {
         State.INSTALLED.toString());
     requests.clear();
     requests.add(r);
-    trackAction = controller.updateServices(requests, mapRequestProps, true,
+    trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true,
       false);
 
     Assert.assertEquals(State.INSTALLED,
@@ -1762,7 +1783,7 @@ public class AmbariManagementControllerTest {
     s1.persist();
 
     ServiceRequest r = new ServiceRequest("c1", null, null, null);
-    Set<ServiceResponse> resp = controller.getServices(Collections.singleton(r));
+    Set<ServiceResponse> resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
 
     ServiceResponse resp1 = resp.iterator().next();
 
@@ -1812,35 +1833,35 @@ public class AmbariManagementControllerTest {
     Set<ServiceResponse> resp;
 
     try {
-      controller.getServices(Collections.singleton(r));
+      ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
       fail("Expected failure for invalid request");
     } catch (Exception e) {
       // Expected
     }
 
     r = new ServiceRequest(c1.getClusterName(), null, null, null);
-    resp = controller.getServices(Collections.singleton(r));
+    resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(3, resp.size());
 
     r = new ServiceRequest(c1.getClusterName(), s2.getName(), null, null);
-    resp = controller.getServices(Collections.singleton(r));
+    resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(1, resp.size());
     Assert.assertEquals(s2.getName(), resp.iterator().next().getServiceName());
 
     try {
       r = new ServiceRequest(c2.getClusterName(), s1.getName(), null, null);
-      controller.getServices(Collections.singleton(r));
+      ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
       fail("Expected failure for invalid service");
     } catch (Exception e) {
       // Expected
     }
 
     r = new ServiceRequest(c1.getClusterName(), null, null, "INSTALLED");
-    resp = controller.getServices(Collections.singleton(r));
+    resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(2, resp.size());
 
     r = new ServiceRequest(c2.getClusterName(), null, null, "INIT");
-    resp = controller.getServices(Collections.singleton(r));
+    resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(1, resp.size());
 
     ServiceRequest r1, r2, r3;
@@ -1850,7 +1871,7 @@ public class AmbariManagementControllerTest {
 
     Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
     reqs.addAll(Arrays.asList(r1, r2, r3));
-    resp = controller.getServices(reqs);
+    resp = ServiceResourceProviderTest.getServices(controller, reqs);
     Assert.assertEquals(3, resp.size());
 
   }
@@ -2348,7 +2369,7 @@ public class AmbariManagementControllerTest {
           null, State.INSTALLING.toString());
       reqs.clear();
       reqs.add(r);
-      controller.updateServices(reqs, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected fail for invalid state transition");
     } catch (Exception e) {
       // Expected
@@ -2358,7 +2379,7 @@ public class AmbariManagementControllerTest {
         null, State.INSTALLED.toString());
     reqs.clear();
     reqs.add(r);
-    RequestStatusResponse trackAction = controller.updateServices(reqs,
+    RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, reqs,
         mapRequestProps, true, false);
     Assert.assertNull(trackAction);
   }
@@ -2405,7 +2426,7 @@ public class AmbariManagementControllerTest {
           State.INSTALLED.toString());
       reqs.add(req1);
       reqs.add(req2);
-      controller.updateServices(reqs, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for multi cluster update");
     } catch (Exception e) {
       // Expected
@@ -2419,7 +2440,7 @@ public class AmbariManagementControllerTest {
           State.INSTALLED.toString());
       reqs.add(req1);
       reqs.add(req2);
-      controller.updateServices(reqs, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for dups services");
     } catch (Exception e) {
       // Expected
@@ -2436,7 +2457,7 @@ public class AmbariManagementControllerTest {
           State.STARTED.toString());
       reqs.add(req1);
       reqs.add(req2);
-      controller.updateServices(reqs, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for different states");
     } catch (Exception e) {
       // Expected
@@ -2557,7 +2578,7 @@ public class AmbariManagementControllerTest {
       req1 = new ServiceRequest(clusterName, serviceName1, null,
           State.STARTED.toString());
       reqs.add(req1);
-      controller.updateServices(reqs, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for invalid state update");
     } catch (Exception e) {
       // Expected
@@ -2584,7 +2605,7 @@ public class AmbariManagementControllerTest {
       req1 = new ServiceRequest(clusterName, serviceName1, null,
           State.STARTED.toString());
       reqs.add(req1);
-      controller.updateServices(reqs, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for invalid state update");
     } catch (Exception e) {
       // Expected
@@ -2613,7 +2634,7 @@ public class AmbariManagementControllerTest {
         State.STARTED.toString());
     reqs.add(req1);
     reqs.add(req2);
-    RequestStatusResponse trackAction = controller.updateServices(reqs,
+    RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, reqs,
       mapRequestProps, true, false);
 
     Assert.assertEquals(State.STARTED, s1.getDesiredState());
@@ -2698,7 +2719,7 @@ public class AmbariManagementControllerTest {
         State.STARTED.toString());
     reqs.add(req1);
     reqs.add(req2);
-    trackAction = controller.updateServices(reqs, mapRequestProps, true,
+    trackAction = ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true,
       false);
     Assert.assertNull(trackAction);
 
@@ -3749,7 +3770,7 @@ public class AmbariManagementControllerTest {
     sReqs.clear();
     sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
         "INSTALLED"));
-    RequestStatusResponse trackAction = controller.updateServices(sReqs,
+    RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, sReqs,
       mapRequestProps, true, false);
     List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId());
     for (ExecutionCommandWrapper cmd : stages.get(0)
@@ -3842,7 +3863,7 @@ public class AmbariManagementControllerTest {
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
     requests.add(r);
 
-    controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
             clusters.getCluster(clusterName).getService(serviceName)
                     .getDesiredState());
@@ -3861,7 +3882,7 @@ public class AmbariManagementControllerTest {
             State.STARTED.toString());
     requests.clear();
     requests.add(r);
-    controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
 
     // manually change live state to started as no running action manager
     for (ServiceComponent sc :
@@ -3961,7 +3982,7 @@ public class AmbariManagementControllerTest {
     sReqs.clear();
     sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
             null));
-    Assert.assertNull(controller.updateServices(sReqs, mapRequestProps, true, false));
+    Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
     Assert.assertEquals(3, s.getDesiredConfigs().size());
     Assert.assertEquals(3, sc1.getDesiredConfigs().size());
@@ -4196,7 +4217,7 @@ public class AmbariManagementControllerTest {
     sReqs.clear();
     sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
         null));
-    Assert.assertNull(controller.updateServices(sReqs, mapRequestProps, true, false));
+    Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
     Assert.assertEquals(3, s.getDesiredConfigs().size());
     Assert.assertEquals(3, sc1.getDesiredConfigs().size());
@@ -4348,7 +4369,7 @@ public class AmbariManagementControllerTest {
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
     requests.add(r);
 
-    controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
       clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
@@ -4459,7 +4480,7 @@ public class AmbariManagementControllerTest {
     sReqs.clear();
     sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
       null));
-    Assert.assertNull(controller.updateServices(sReqs, mapRequestProps, true, false));
+    Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
     Assert.assertEquals(2, s.getDesiredConfigs().size());
 
     // Reconfigure S Level
@@ -4469,7 +4490,7 @@ public class AmbariManagementControllerTest {
     sReqs.clear();
     sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
       null));
-    Assert.assertNull(controller.updateServices(sReqs, mapRequestProps, true, false));
+    Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
     entityManager.clear();
 
@@ -4492,8 +4513,6 @@ public class AmbariManagementControllerTest {
     String componentName4 = "JOBTRACKER";
     String componentName5 = "TASKTRACKER";
     String componentName6 = "MAPREDUCE_CLIENT";
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
 
     createService(clusterName, serviceName1, null);
     createService(clusterName, serviceName2, null);
@@ -4674,9 +4693,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1,
       State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,
@@ -4805,7 +4821,7 @@ public class AmbariManagementControllerTest {
     requests.add(r);
 
     RequestStatusResponse trackAction =
-        controller.updateServices(requests, mapRequestProps, true, false);
+        ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
         clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
@@ -4839,7 +4855,7 @@ public class AmbariManagementControllerTest {
     requests.clear();
     requests.add(r);
 
-    trackAction = controller.updateServices(requests, mapRequestProps, true, false);
+    trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertNotNull(trackAction);
     Assert.assertEquals(State.INSTALLED,
         clusters.getCluster(clusterName).getService(serviceName)
@@ -5324,7 +5340,7 @@ public class AmbariManagementControllerTest {
       clusterName, serviceName, null, State.STARTED.name());
     Set<ServiceRequest> setReqs = new HashSet<ServiceRequest>();
     setReqs.add(sr);
-    RequestStatusResponse resp = controller.updateServices(
+    RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller, 
       setReqs, Collections.<String, String>emptyMap(), false, true);
 
     Assert.assertNotNull(resp);
@@ -5434,9 +5450,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1,
       State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,
@@ -5535,9 +5548,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1,
       State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,
@@ -5620,9 +5630,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
     
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1,
       State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,
@@ -5676,7 +5683,7 @@ public class AmbariManagementControllerTest {
       put("hdfs-site", "version1");
     }};
     ServiceRequest sr = new ServiceRequest(clusterName, serviceName, configVersions, null);
-    controller.updateServices(Collections.singleton(sr), new HashMap<String,String>(), false, false);
+    ServiceResourceProviderTest.updateServices(controller, Collections.singleton(sr), new HashMap<String,String>(), false, false);
 
     // Install
     installService(clusterName, serviceName, false, false);
@@ -5742,7 +5749,7 @@ public class AmbariManagementControllerTest {
     requests.add(r);
 
     RequestStatusResponse trackAction =
-      controller.updateServices(requests, mapRequestProps, true, false);
+      ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
       clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
@@ -5972,7 +5979,9 @@ public class AmbariManagementControllerTest {
     Assert.assertTrue(c.getCurrentStackVersion().equals(newStackId));
   }
 
-  //@Test - disabled as upgrade feature is disabled
+  // disabled as upgrade feature is disabled
+  @Ignore
+  @Test
   public void testUpdateClusterVersionBasic() throws AmbariException {
     String clusterName = "foo1";
     String serviceName = "MAPREDUCE";
@@ -6144,7 +6153,9 @@ public class AmbariManagementControllerTest {
     }
   }
 
-  //@Test - disabled as cluster upgrade feature is disabled
+  // disabled as cluster upgrade feature is disabled
+  @Ignore
+  @Test
   public void testUpdateClusterVersionCombinations() throws AmbariException {
     String clusterName = "foo1";
     String pigServiceName = "PIG";
@@ -6513,8 +6524,7 @@ public class AmbariManagementControllerTest {
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
     requests.add(r);
 
-    RequestStatusResponse trackAction =
-      controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
       clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
@@ -6533,7 +6543,7 @@ public class AmbariManagementControllerTest {
       State.STARTED.toString());
     requests.clear();
     requests.add(r);
-    trackAction = controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
 
     // manually change live state to started as no running action manager
     for (ServiceComponent sc :
@@ -6578,7 +6588,7 @@ public class AmbariManagementControllerTest {
       State.INSTALLED.toString());
     requests.clear();
     requests.add(r);
-    controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
 
     for (ServiceComponent sc :
       clusters.getCluster(clusterName).getService(serviceName)
@@ -6785,8 +6795,7 @@ public class AmbariManagementControllerTest {
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
     requests.add(r);
 
-    RequestStatusResponse trackAction =
-      controller.updateServices(requests, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
     Assert.assertEquals(State.INSTALLED,
       clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
@@ -6826,9 +6835,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName1, componentName1, State.INIT);
     createServiceComponent(clusterName, serviceName1, componentName2, State.INIT);
     createServiceComponent(clusterName, serviceName1, componentName3, State.INIT);
@@ -6875,7 +6881,7 @@ public class AmbariManagementControllerTest {
     // an UNKOWN failure will throw an exception
     ServiceRequest req = new ServiceRequest(clusterName, serviceName1, null,
         State.INSTALLED.toString());
-    controller.updateServices(Collections.singleton(req), Collections.<String, String>emptyMap(), true, false);
+    ServiceResourceProviderTest.updateServices(controller, Collections.singleton(req), Collections.<String, String>emptyMap(), true, false);
   }
 
   @Test
@@ -7112,9 +7118,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
     createServiceComponent(clusterName, serviceName, componentName3, State.INIT);
@@ -7253,7 +7256,7 @@ public class AmbariManagementControllerTest {
     try {
       controller.getRootServices(Collections.singleton(invalidRequest));
     } catch (ObjectNotFoundException e) {
-      Assert.assertTrue(e instanceof ObjectNotFoundException);
+      // do nothing
     }
   }
   
@@ -7278,7 +7281,7 @@ public class AmbariManagementControllerTest {
     try {
       controller.getRootServiceComponents(Collections.singleton(invalidRequest));
     } catch (ObjectNotFoundException e) {
-      Assert.assertTrue(e instanceof ObjectNotFoundException);
+      // do nothing
     }
   }
   
@@ -7297,9 +7300,6 @@ public class AmbariManagementControllerTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
     createServiceComponent(clusterName, serviceName, componentName3, State.INIT);
@@ -7400,6 +7400,822 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("value21", mergedConfig.get("name2"));
     Assert.assertEquals("value41", mergedConfig.get("name4"));
   }
+
+  @Test
+  public void testApplyConfigurationWithTheSameTag() {
+    Injector injector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        Properties properties = new Properties();
+        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
+        properties.setProperty(Configuration.METADETA_DIR_PATH,
+            "src/main/resources/stacks");
+        properties.setProperty(Configuration.SERVER_VERSION_FILE,
+            "target/version");
+        properties.setProperty(Configuration.OS_VERSION_KEY,
+            "centos6");
+        try {
+          install(new ControllerModule(properties));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+    injector.getInstance(GuiceJpaInitializer.class);
+
+    try {
+      String tag = "version1";
+      String type = "core-site";
+      AmbariException exception = null;
+      try {
+        AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+        Clusters clusters = injector.getInstance(Clusters.class);
+        Gson gson = new Gson();
+
+        clusters.addHost("host1");
+        clusters.addHost("host2");
+        clusters.addHost("host3");
+        Host host = clusters.getHost("host1");
+        host.setOsType("centos6");
+        host.persist();
+        host = clusters.getHost("host2");
+        host.setOsType("centos6");
+        host.persist();
+        host = clusters.getHost("host3");
+        host.setOsType("centos6");
+        host.persist();
+
+        ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
+        amc.createCluster(clusterRequest);
+
+        Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+        serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+
+        ServiceResourceProviderTest.createServices(amc, serviceRequests);
+
+        Type confType = new TypeToken<Map<String, String>>() {
+        }.getType();
+
+        ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", type, tag,
+            gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType));
+        amc.createConfiguration(configurationRequest);
+
+        amc.createConfiguration(configurationRequest);
+      } catch (AmbariException e) {
+        exception = e;
+      }
+
+      assertNotNull(exception);
+      String exceptionMessage = MessageFormat.format("Configuration with tag ''{0}'' exists for ''{1}''",
+          tag, type);
+      org.junit.Assert.assertEquals(exceptionMessage, exception.getMessage());
+    } finally {
+      injector.getInstance(PersistService.class).stop();
+    }
+  }
+
+  @Test
+  public void testDeleteClusterCreateHost() throws Exception {
+
+    Injector injector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        Properties properties = new Properties();
+        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
+
+        properties.setProperty(Configuration.METADETA_DIR_PATH,
+            "src/test/resources/stacks");
+        properties.setProperty(Configuration.SERVER_VERSION_FILE,
+            "../version");
+        properties.setProperty(Configuration.OS_VERSION_KEY, "centos6");
+
+        try {
+          install(new ControllerModule(properties));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+    injector.getInstance(GuiceJpaInitializer.class);
+
+
+    String STACK_ID = "HDP-2.0.1";
+    String CLUSTER_NAME = "c1";
+    String HOST1 = "h1";
+    String HOST2 = "h2";
+
+    try {
+      Clusters clusters = injector.getInstance(Clusters.class);
+
+      clusters.addHost(HOST1);
+      Host host = clusters.getHost(HOST1);
+      host.setOsType("centos6");
+      host.persist();
+
+      clusters.addHost(HOST2);
+      host = clusters.getHost(HOST2);
+      host.setOsType("centos6");
+      host.persist();
+
+      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+
+      ClusterRequest cr = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
+      amc.createCluster(cr);
+
+      ConfigurationRequest configRequest = new ConfigurationRequest(CLUSTER_NAME, "global", "version1",
+          new HashMap<String, String>() {{ put("a", "b"); }});
+      cr.setDesiredConfig(configRequest);
+      amc.updateClusters(Collections.singleton(cr), new HashMap<String, String>());
+
+      // add some hosts
+      Set<HostRequest> hrs = new HashSet<HostRequest>();
+      hrs.add(new HostRequest(HOST1, CLUSTER_NAME, null));
+      amc.createHosts(hrs);
+
+      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
+
+      ServiceResourceProviderTest.createServices(amc, serviceRequests);
+
+      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "HDFS_CLIENT", null, null));
+
+      amc.createComponents(serviceComponentRequests);
+
+      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HDFS_CLIENT", HOST1, null, null));
+
+      amc.createHostComponents(componentHostRequests);
+
+      ActionRequest ar = new ActionRequest(CLUSTER_NAME, "HDFS", Role.HDFS_SERVICE_CHECK.name(), new HashMap<String, String>());
+      amc.createActions(Collections.singleton(ar), null);
+
+      // change mind, delete the cluster
+      amc.deleteCluster(cr);
+
+      assertNotNull(clusters.getHost(HOST1));
+      assertNotNull(clusters.getHost(HOST2));
+
+      HostDAO dao = injector.getInstance(HostDAO.class);
+
+      assertNotNull(dao.findByName(HOST1));
+      assertNotNull(dao.findByName(HOST2));
+
+    } finally {
+      injector.getInstance(PersistService.class).stop();
+    }
+  }
+
+  @Test
+  public void testMaintenanceAndDeleteStates() throws Exception {
+    Map<String,String> mapRequestProps = new HashMap<String, String>();
+    Injector injector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        Properties properties = new Properties();
+        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
+
+        properties.setProperty(Configuration.METADETA_DIR_PATH,
+            "src/main/resources/stacks");
+        properties.setProperty(Configuration.SERVER_VERSION_FILE,
+            "target/version");
+        properties.setProperty(Configuration.OS_VERSION_KEY,
+            "centos5");
+        try {
+          install(new ControllerModule(properties));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+    injector.getInstance(GuiceJpaInitializer.class);
+
+    try {
+      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+      Clusters clusters = injector.getInstance(Clusters.class);
+      Gson gson = new Gson();
+
+      clusters.addHost("host1");
+      clusters.addHost("host2");
+      clusters.addHost("host3");
+      Host host = clusters.getHost("host1");
+      host.setOsType("centos5");
+      host.persist();
+      host = clusters.getHost("host2");
+      host.setOsType("centos5");
+      host.persist();
+      host = clusters.getHost("host3");
+      host.setOsType("centos5");
+      host.persist();
+
+      ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
+      amc.createCluster(clusterRequest);
+
+      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+
+      ServiceResourceProviderTest.createServices(amc, serviceRequests);
+
+      Type confType = new TypeToken<Map<String, String>>() {
+      }.getType();
+
+      ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1",
+          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+
+      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1",
+          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+
+      configurationRequest = new ConfigurationRequest("c1", "global", "version1",
+          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+
+
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS",
+          gson.<Map<String, String>>fromJson("{\"core-site\": \"version1\", \"hdfs-site\": \"version1\", \"global\" : \"version1\" }", confType)
+          , null));
+
+      ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
+
+      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
+
+      amc.createComponents(serviceComponentRequests);
+
+      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
+      hostRequests.add(new HostRequest("host1", "c1", null));
+      hostRequests.add(new HostRequest("host2", "c1", null));
+      hostRequests.add(new HostRequest("host3", "c1", null));
+
+      amc.createHosts(hostRequests);
+
+      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
+
+
+      amc.createHostComponents(componentHostRequests);
+
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
+      ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
+
+      Cluster cluster = clusters.getCluster("c1");
+      Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(1, namenodes.size());
+
+      ServiceComponentHost componentHost = namenodes.get("host1");
+
+      Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
+      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+        ServiceComponentHost cHost = entry.getValue();
+        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+      }
+      hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+        ServiceComponentHost cHost = entry.getValue();
+        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+      }
+      hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
+      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+        ServiceComponentHost cHost = entry.getValue();
+        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+      }
+
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
+
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+
+      org.junit.Assert.assertEquals(State.MAINTENANCE, componentHost.getState());
+
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "INSTALLED"));
+
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+
+      org.junit.Assert.assertEquals(State.INSTALLED, componentHost.getState());
+
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
+
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+
+      org.junit.Assert.assertEquals(State.MAINTENANCE, componentHost.getState());
+
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, null));
+
+      amc.createHostComponents(componentHostRequests);
+
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, "INSTALLED"));
+
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(2, namenodes.size());
+
+      componentHost = namenodes.get("host2");
+      componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+      componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis()));
+
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
+
+      RequestStatusResponse response = ServiceResourceProviderTest.updateServices(amc, serviceRequests,
+          mapRequestProps, true, false);
+      for (ShortTaskStatus shortTaskStatus : response.getTasks()) {
+        assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole()));
+      }
+
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+
+      amc.deleteHostComponents(componentHostRequests);
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(1, namenodes.size());
+
+      // testing the behavior for runSmokeTest flag
+      // piggybacking on this test to avoid setting up the mock cluster
+      testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests);
+
+      // should be able to add the host component back
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+      amc.createHostComponents(componentHostRequests);
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(2, namenodes.size());
+
+
+      // make unknown
+      ServiceComponentHost sch = null;
+      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
+        if (tmp.getServiceComponentName().equals("DATANODE")) {
+          tmp.setState(State.UNKNOWN);
+          sch = tmp;
+        }
+      }
+      assertNotNull(sch);
+
+      // make maintenance
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, "MAINTENANCE"));
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, false);
+      org.junit.Assert.assertEquals(State.MAINTENANCE, sch.getState());
+
+      // confirm delete
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
+      amc.deleteHostComponents(componentHostRequests);
+
+      sch = null;
+      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
+        if (tmp.getServiceComponentName().equals("DATANODE")) {
+          sch = tmp;
+        }
+      }
+      org.junit.Assert.assertNull(sch);
+
+      /*
+      *Test remove service
+      */
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
+      ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", null, null, null));
+      org.junit.Assert.assertEquals(1, ServiceResourceProviderTest.getServices(amc, serviceRequests).size());
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+      ServiceResourceProviderTest.deleteServices(amc, serviceRequests);
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", null, null, null));
+      org.junit.Assert.assertEquals(0, ServiceResourceProviderTest.getServices(amc, serviceRequests).size());
+
+      /*
+      *Test add service again
+      */
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+
+      ServiceResourceProviderTest.createServices(amc, serviceRequests);
+
+      org.junit.Assert.assertEquals(1, ServiceResourceProviderTest.getServices(amc, serviceRequests).size());
+      //Create new configs
+      configurationRequest = new ConfigurationRequest("c1", "core-site", "version2",
+          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2",
+          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+      configurationRequest = new ConfigurationRequest("c1", "global", "version2",
+          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+      //Add configs to service
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS",
+          gson.<Map<String, String>>fromJson("{\"core-site\": \"version2\", \"hdfs-site\": \"version2\", \"global\" : \"version2\" }", confType)
+          , null));
+      ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
+      //Crate service components
+      serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
+      amc.createComponents(serviceComponentRequests);
+
+      //Create ServiceComponentHosts
+      componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
+      amc.createHostComponents(componentHostRequests);
+
+
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(1, namenodes.size());
+      Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(3, datanodes.size());
+      Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
+      org.junit.Assert.assertEquals(1, namenodes2.size());
+    } finally {
+      injector.getInstance(PersistService.class).stop();
+    }
+  }
+
+  @Test
+  public void testScheduleSmokeTest() throws Exception {
+
+    final String HOST1 = "host1";
+    final String OS_TYPE = "centos5";
+    final String STACK_ID = "HDP-2.0.1";
+    final String CLUSTER_NAME = "c1";
+    final String HDFS_SERVICE_CHECK_ROLE = "HDFS_SERVICE_CHECK";
+    final String MAPREDUCE2_SERVICE_CHECK_ROLE = "MAPREDUCE2_SERVICE_CHECK";
+    final String YARN_SERVICE_CHECK_ROLE = "YARN_SERVICE_CHECK";
+
+    Map<String,String> mapRequestProps = Collections.emptyMap();
+    Injector injector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        Properties properties = new Properties();
+        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
+
+        properties.setProperty(Configuration.METADETA_DIR_PATH,
+            "src/test/resources/stacks");
+        properties.setProperty(Configuration.SERVER_VERSION_FILE,
+            "../version");
+        properties.setProperty(Configuration.OS_VERSION_KEY, OS_TYPE);
+        try {
+          install(new ControllerModule(properties));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+    injector.getInstance(GuiceJpaInitializer.class);
+
+    try {
+      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+      Clusters clusters = injector.getInstance(Clusters.class);
+
+      clusters.addHost(HOST1);
+      Host host = clusters.getHost(HOST1);
+      host.setOsType(OS_TYPE);
+      host.persist();
+
+      ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
+      amc.createCluster(clusterRequest);
+
+      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
+
+      ServiceResourceProviderTest.createServices(amc, serviceRequests);
+
+      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
+
+      amc.createComponents(serviceComponentRequests);
+
+      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
+      hostRequests.add(new HostRequest(HOST1, CLUSTER_NAME, null));
+
+      amc.createHosts(hostRequests);
+
+      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
+
+      amc.createHostComponents(componentHostRequests);
+
+      //Install services
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.INSTALLED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.INSTALLED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.INSTALLED.name()));
+
+      ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
+
+      Cluster cluster = clusters.getCluster(CLUSTER_NAME);
+
+      for (String serviceName : cluster.getServices().keySet() ) {
+
+        for(String componentName: cluster.getService(serviceName).getServiceComponents().keySet()) {
+
+          Map<String, ServiceComponentHost> serviceComponentHosts = cluster.getService(serviceName).getServiceComponent(componentName).getServiceComponentHosts();
+
+          for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHosts.entrySet()) {
+            ServiceComponentHost cHost = entry.getValue();
+            cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), STACK_ID));
+            cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+          }
+        }
+      }
+
+      //Start services
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.STARTED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.STARTED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.STARTED.name()));
+
+      RequestStatusResponse response = ServiceResourceProviderTest.updateServices(amc, serviceRequests,
+          mapRequestProps, true, false);
+
+      Collection<?> hdfsSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(HDFS_SERVICE_CHECK_ROLE));
+      //Ensure that smoke test task was created for HDFS
+      org.junit.Assert.assertEquals(1, hdfsSmokeTasks.size());
+
+      Collection<?> mapreduce2SmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(MAPREDUCE2_SERVICE_CHECK_ROLE));
+      //Ensure that smoke test task was created for MAPREDUCE2
+      org.junit.Assert.assertEquals(1, mapreduce2SmokeTasks.size());
+
+      Collection<?> yarnSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(YARN_SERVICE_CHECK_ROLE));
+      //Ensure that smoke test task was created for YARN
+      org.junit.Assert.assertEquals(1, yarnSmokeTasks.size());
+    } finally {
+      injector.getInstance(PersistService.class).stop();
+    }
+  }
+
+  @Test
+  public void testGetServices2() throws Exception {
+    // member state mocks
+    Injector injector = createStrictMock(Injector.class);
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    Clusters clusters = createNiceMock(Clusters.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service = createNiceMock(Service.class);
+    ServiceResponse response = createNiceMock(ServiceResponse.class);
+
+    // requests
+    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
+
+    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
+    setRequests.add(request1);
+
+    // expectations
+    // constructor init
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+
+    // getServices
+    expect(clusters.getCluster("cluster1")).andReturn(cluster);
+    expect(cluster.getService("service1")).andReturn(service);
+
+    expect(service.convertToResponse()).andReturn(response);
+    // replay mocks
+    replay(injector, clusters, cluster, service, response);
+
+    //test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    Set<ServiceResponse> setResponses = ServiceResourceProviderTest.getServices(controller, setRequests);
+
+    // assert and verify
+    assertSame(controller, controllerCapture.getValue());
+    assertEquals(1, setResponses.size());
+    assertTrue(setResponses.contains(response));
+
+    verify(injector, clusters, cluster, service, response);
+  }
+
+  /**
+   * Ensure that ServiceNotFoundException is propagated in case where there is a single request.
+   */
+  @Test
+  public void testGetServices___ServiceNotFoundException() throws Exception {
+    // member state mocks
+    Injector injector = createStrictMock(Injector.class);
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    Clusters clusters = createNiceMock(Clusters.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+
+    // requests
+    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
+    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
+    setRequests.add(request1);
+
+    // expectations
+    // constructor init
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+
+    // getServices
+    expect(clusters.getCluster("cluster1")).andReturn(cluster);
+    expect(cluster.getService("service1")).andThrow(new ServiceNotFoundException("custer1", "service1"));
+
+    // replay mocks
+    replay(injector, clusters, cluster);
+
+    //test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+
+    // assert that exception is thrown in case where there is a single request
+    try {
+      ServiceResourceProviderTest.getServices(controller, setRequests);
+      fail("expected ServiceNotFoundException");
+    } catch (ServiceNotFoundException e) {
+      // expected
+    }
+
+    assertSame(controller, controllerCapture.getValue());
+    verify(injector, clusters, cluster);
+  }
+
+  /**
+   * Ensure that ServiceNotFoundException is handled where there are multiple requests as would be the
+   * case when an OR predicate is provided in the query.
+   */
+  @Test
+  public void testGetServices___OR_Predicate_ServiceNotFoundException() throws Exception {
+    // member state mocks
+    Injector injector = createStrictMock(Injector.class);
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    Clusters clusters = createNiceMock(Clusters.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service1 = createNiceMock(Service.class);
+    Service service2 = createNiceMock(Service.class);
+    ServiceResponse response = createNiceMock(ServiceResponse.class);
+    ServiceResponse response2 = createNiceMock(ServiceResponse.class);
+
+    // requests
+    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
+    ServiceRequest request2 = new ServiceRequest("cluster1", "service2", Collections.<String, String>emptyMap(), null);
+    ServiceRequest request3 = new ServiceRequest("cluster1", "service3", Collections.<String, String>emptyMap(), null);
+    ServiceRequest request4 = new ServiceRequest("cluster1", "service4", Collections.<String, String>emptyMap(), null);
+
+    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
+    setRequests.add(request1);
+    setRequests.add(request2);
+    setRequests.add(request3);
+    setRequests.add(request4);
+
+    // expectations
+    // constructor init
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+
+    // getServices
+    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
+    expect(cluster.getService("service1")).andReturn(service1);
+    expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
+    expect(cluster.getService("service3")).andThrow(new ServiceNotFoundException("cluster1", "service3"));
+    expect(cluster.getService("service4")).andReturn(service2);
+
+    expect(service1.convertToResponse()).andReturn(response);
+    expect(service2.convertToResponse()).andReturn(response2);
+    // replay mocks
+    replay(injector, clusters, cluster, service1, service2, response, response2);
+
+    //test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    Set<ServiceResponse> setResponses = ServiceResourceProviderTest.getServices(controller, setRequests);
+
+    // assert and verify
+    assertSame(controller, controllerCapture.getValue());
+    assertEquals(2, setResponses.size());
+    assertTrue(setResponses.contains(response));
+    assertTrue(setResponses.contains(response2));
+
+    verify(injector, clusters, cluster, service1, service2, response, response2);
+  }
+
+  private void testRunSmokeTestFlag(Map<String, String> mapRequestProps,
+                                    AmbariManagementController amc,
+                                    Set<ServiceRequest> serviceRequests)
+      throws AmbariException {
+    RequestStatusResponse response;//Starting HDFS service. No run_smoke_test flag is set, smoke
+
+    //Stopping HDFS service
+    serviceRequests.clear();
+    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
+    ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, false,
+        false);
+
+    //Starting HDFS service. No run_smoke_test flag is set, smoke
+    // test(HDFS_SERVICE_CHECK) won't run
+    boolean runSmokeTest = false;
+    serviceRequests.clear();
+    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
+    response = ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps,
+        runSmokeTest, false);
+
+    List<ShortTaskStatus> taskStatuses = response.getTasks();
+    boolean smokeTestRequired = false;
+    for (ShortTaskStatus shortTaskStatus : taskStatuses) {
+      if (shortTaskStatus.getRole().equals(Role.HDFS_SERVICE_CHECK.toString())) {
+        smokeTestRequired= true;
+      }
+    }
+    assertFalse(smokeTestRequired);
+
+    //Stopping HDFS service
+    serviceRequests.clear();
+    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
+    ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, false,
+        false);
+
+    //Starting HDFS service again.
+    //run_smoke_test flag is set, smoke test will be run
+    runSmokeTest = true;
+    serviceRequests.clear();
+    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
+    response = ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps,
+        runSmokeTest, false);
+
+    taskStatuses = response.getTasks();
+    smokeTestRequired = false;
+    for (ShortTaskStatus shortTaskStatus : taskStatuses) {
+      if (shortTaskStatus.getRole().equals(Role.HDFS_SERVICE_CHECK.toString())) {
+        smokeTestRequired= true;
+      }
+    }
+    assertTrue(smokeTestRequired);
+  }
+
+  private class RolePredicate implements org.apache.commons.collections.Predicate {
+
+    private String role;
+
+    public RolePredicate(String role) {
+      this.role = role;
+    }
+
+    @Override
+    public boolean evaluate(Object obj) {
+      ShortTaskStatus task = (ShortTaskStatus)obj;
+      return task.getRole().equals(role);
+    }
+  }
+
 }
 
   

+ 3 - 7
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java

@@ -46,7 +46,6 @@ public class JMXHostProviderTest {
   private Injector injector;
   private Clusters clusters;
   static AmbariManagementController controller;
-  private AmbariMetaInfo ambariMetaInfo;
   private static final String NAMENODE_PORT = "dfs.http.address";
   private static final String DATANODE_PORT = "dfs.datanode.http.address";
 
@@ -56,7 +55,7 @@ public class JMXHostProviderTest {
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
     controller = injector.getInstance(AmbariManagementController.class);
-    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
     ambariMetaInfo.init();
   }
 
@@ -75,7 +74,7 @@ public class JMXHostProviderTest {
       dStateStr);
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
     requests.add(r1);
-    controller.createServices(requests);
+    ServiceResourceProviderTest.createServices(controller, requests);
   }
 
   private void createServiceComponent(String clusterName,
@@ -164,7 +163,7 @@ public class JMXHostProviderTest {
     configVersions.put("hdfs-site", "version1");
     sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
       null));
-    controller.updateServices(sReqs, mapRequestProps, true, false);
+    ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false);
   }
 
   private void createConfigs() throws AmbariException {
@@ -178,9 +177,6 @@ public class JMXHostProviderTest {
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
 
-    Map<String, String> mapRequestProps = new HashMap<String, String>();
-    mapRequestProps.put("context", "Called from a test");
-
     createServiceComponent(clusterName, serviceName, componentName1,
       State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,

+ 237 - 121
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java

@@ -18,7 +18,8 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.ServiceRequest;
@@ -29,10 +30,19 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.easymock.*;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.easymock.Capture;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -42,14 +52,13 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNull;
+import static org.easymock.EasyMock.anyBoolean;
+import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.verify;
 
 /**
@@ -59,21 +68,31 @@ public class ServiceResourceProviderTest {
 
   @Test
   public void testCreateResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
+    AmbariManagementController managementController = createNiceMock(AmbariManagementController.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service = createNiceMock(Service.class);
+    StackId stackId = createNiceMock(StackId.class);
+    ServiceFactory serviceFactory = createNiceMock(ServiceFactory.class);
+    AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
 
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    expect(managementController.getClusters()).andReturn(clusters);
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo);
+    expect(managementController.getServiceFactory()).andReturn(serviceFactory);
+
+    expect(serviceFactory.createNew(cluster, "Service100")).andReturn(service);
+
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+
+    expect(cluster.getService("Service100")).andReturn(null);
+    expect(cluster.getDesiredStackVersion()).andReturn(stackId);
 
-    managementController.createServices(AbstractResourceProviderTest.Matcher.getServiceRequestSet("Cluster100", "Service100", null, "DEPLOYED"));
+    expect(ambariMetaInfo.isValidService( (String) anyObject(), (String) anyObject(), (String) anyObject())).andReturn(true);
 
     // replay
-    replay(managementController, response);
+    replay(managementController, clusters, cluster, service, ambariMetaInfo, stackId, serviceFactory);
 
-    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
+    ResourceProvider provider = getServiceProvider(managementController);
 
     // add the property map to a set for the request.  add more maps for multiple creates
     Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
@@ -84,7 +103,7 @@ public class ServiceResourceProviderTest {
     // add properties to the request map
     properties.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
     properties.put(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID, "Service100");
-    properties.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "DEPLOYED");
+    properties.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INIT");
 
     propertySet.add(properties);
 
@@ -94,43 +113,82 @@ public class ServiceResourceProviderTest {
     provider.createResources(request);
 
     // verify
-    verify(managementController, response);
+    verify(managementController, clusters, cluster, service, ambariMetaInfo, stackId, serviceFactory);
   }
 
   @Test
   public void testGetResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<ServiceResponse> allResponse = new HashSet<ServiceResponse>();
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service100", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service101", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service103", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service104", null, "HDP-0.1", "DEPLOYED"));
-
-    Set<ServiceResponse> nameResponse = new HashSet<ServiceResponse>();
-    nameResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-
-    Set<ServiceResponse> stateResponse = new HashSet<ServiceResponse>();
-    stateResponse.add(new ServiceResponse(100L, "Cluster100", "Service100", null, "HDP-0.1", "DEPLOYED"));
-    stateResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-    stateResponse.add(new ServiceResponse(100L, "Cluster100", "Service104", null, "HDP-0.1", "DEPLOYED"));
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service0 = createNiceMock(Service.class);
+    Service service1 = createNiceMock(Service.class);
+    Service service2 = createNiceMock(Service.class);
+    Service service3 = createNiceMock(Service.class);
+    Service service4 = createNiceMock(Service.class);
+    ServiceResponse serviceResponse0 = createNiceMock(ServiceResponse.class);
+    ServiceResponse serviceResponse1 = createNiceMock(ServiceResponse.class);
+    ServiceResponse serviceResponse2 = createNiceMock(ServiceResponse.class);
+    ServiceResponse serviceResponse3 = createNiceMock(ServiceResponse.class);
+    ServiceResponse serviceResponse4 = createNiceMock(ServiceResponse.class);
+
+    StackId stackId = createNiceMock(StackId.class);
+    ServiceFactory serviceFactory = createNiceMock(ServiceFactory.class);
+    AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
+
+    Map<String, Service> allResponseMap = new HashMap<String, Service>();
+    allResponseMap.put("Service100", service0);
+    allResponseMap.put("Service101", service1);
+    allResponseMap.put("Service102", service2);
+    allResponseMap.put("Service103", service3);
+    allResponseMap.put("Service104", service4);
 
     // set expectations
-    expect(managementController.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(allResponse).once();
-    expect(managementController.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(nameResponse).once();
-    expect(managementController.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(stateResponse).once();
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
+
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+
+    expect(cluster.getServices()).andReturn(allResponseMap).anyTimes();
+    expect(cluster.getService("Service102")).andReturn(service2);
+
+    expect(service0.convertToResponse()).andReturn(serviceResponse0).anyTimes();
+    expect(service1.convertToResponse()).andReturn(serviceResponse1).anyTimes();
+    expect(service2.convertToResponse()).andReturn(serviceResponse2).anyTimes();
+    expect(service3.convertToResponse()).andReturn(serviceResponse3).anyTimes();
+    expect(service4.convertToResponse()).andReturn(serviceResponse4).anyTimes();
+
+    expect(service0.getName()).andReturn("Service100").anyTimes();
+    expect(service1.getName()).andReturn("Service101").anyTimes();
+    expect(service2.getName()).andReturn("Service102").anyTimes();
+    expect(service3.getName()).andReturn("Service103").anyTimes();
+    expect(service4.getName()).andReturn("Service104").anyTimes();
+
+    expect(service0.getDesiredState()).andReturn(State.INIT);
+    expect(service1.getDesiredState()).andReturn(State.INSTALLED);
+    expect(service2.getDesiredState()).andReturn(State.INIT);
+    expect(service3.getDesiredState()).andReturn(State.INSTALLED);
+    expect(service4.getDesiredState()).andReturn(State.INIT);
+
+    expect(serviceResponse0.getClusterName()).andReturn("Cluster100").anyTimes();
+    expect(serviceResponse0.getServiceName()).andReturn("Service100").anyTimes();
+    expect(serviceResponse1.getClusterName()).andReturn("Cluster100").anyTimes();
+    expect(serviceResponse1.getServiceName()).andReturn("Service101").anyTimes();
+    expect(serviceResponse2.getClusterName()).andReturn("Cluster100").anyTimes();
+    expect(serviceResponse2.getServiceName()).andReturn("Service102").anyTimes();
+    expect(serviceResponse3.getClusterName()).andReturn("Cluster100").anyTimes();
+    expect(serviceResponse3.getServiceName()).andReturn("Service103").anyTimes();
+    expect(serviceResponse4.getClusterName()).andReturn("Cluster100").anyTimes();
+    expect(serviceResponse4.getServiceName()).andReturn("Service104").anyTimes();
 
     // replay
-    replay(managementController);
+    replay(managementController, clusters, cluster,
+        service0, service1, service2, service3, service4,
+        serviceResponse0, serviceResponse1, serviceResponse2, serviceResponse3, serviceResponse4,
+        ambariMetaInfo, stackId, serviceFactory);
 
-    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
+    ResourceProvider provider = getServiceProvider(managementController);
 
     Set<String> propertyIds = new HashSet<String>();
 
@@ -138,9 +196,9 @@ public class ServiceResourceProviderTest {
     propertyIds.add(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID);
 
     // create the request
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    // get all ... no predicate
-    Set<Resource> resources = provider.getResources(request, null);
+    Predicate predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").toPredicate();
+    Request request = PropertyHelper.getReadRequest("ServiceInfo");
+    Set<Resource> resources = provider.getResources(request, predicate);
 
     Assert.assertEquals(5, resources.size());
     Set<String> names = new HashSet<String>();
@@ -150,20 +208,22 @@ public class ServiceResourceProviderTest {
       names.add((String) resource.getPropertyValue(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID));
     }
     // Make sure that all of the response objects got moved into resources
-    for (ServiceResponse serviceResponse : allResponse ) {
-      Assert.assertTrue(names.contains(serviceResponse.getServiceName()));
+    for (Service service : allResponseMap.values() ) {
+      Assert.assertTrue(names.contains(service.getName()));
     }
 
     // get service named Service102
-    Predicate predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service102").toPredicate();
+    predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and().
+        property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service102").toPredicate();
     request = PropertyHelper.getReadRequest("ServiceInfo");
     resources = provider.getResources(request, predicate);
 
     Assert.assertEquals(1, resources.size());
     Assert.assertEquals("Service102", resources.iterator().next().getPropertyValue(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID));
 
-    // get services where state == "DEPLOYED"
-    predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID).equals("DEPLOYED").toPredicate();
+    // get services where state == "INIT"
+    predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and().
+        property(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID).equals("INIT").toPredicate();
     request = PropertyHelper.getReadRequest(propertyIds);
     resources = provider.getResources(request, predicate);
 
@@ -174,43 +234,60 @@ public class ServiceResourceProviderTest {
       Assert.assertEquals("Cluster100", clusterName);
       names.add((String) resource.getPropertyValue(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID));
     }
-    // Make sure that all of the response objects got moved into resources
-    for (ServiceResponse serviceResponse : stateResponse ) {
-      Assert.assertTrue(names.contains(serviceResponse.getServiceName()));
-    }
 
     // verify
-    verify(managementController);
+    verify(managementController, clusters, cluster,
+        service0, service1, service2, service3, service4,
+        serviceResponse0, serviceResponse1, serviceResponse2, serviceResponse3, serviceResponse4,
+        ambariMetaInfo, stackId, serviceFactory);
   }
 
   @Test
   public void testUpdateResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-    Capture<Set<ServiceRequest>> requestsCapture = new Capture<Set<ServiceRequest>>();
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service0 = createNiceMock(Service.class);
+    ServiceFactory serviceFactory = createNiceMock(ServiceFactory.class);
+    AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
+    RequestStatusResponse requestStatusResponse = createNiceMock(RequestStatusResponse.class);
 
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
 
     // set expectations
-    expect(managementController.updateServices(capture(requestsCapture),
-      eq(mapRequestProps), eq(false), eq(true))).andReturn(response).once();
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getServiceFactory()).andReturn(serviceFactory).anyTimes();
+
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+
+    expect(cluster.getService("Service102")).andReturn(service0);
+
+    expect(service0.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
+    expect(service0.getServiceComponents()).andReturn(Collections.<String, ServiceComponent>emptyMap()).anyTimes();
+
+    Capture<Map<String, String>> requestPropertiesCapture = new Capture<Map<String, String>>();
+    Capture<Map<State, List<Service>>> changedServicesCapture = new Capture<Map<State, List<Service>>>();
+    Capture<Map<State, List<ServiceComponent>>> changedCompsCapture = new Capture<Map<State, List<ServiceComponent>>>();
+    Capture<Map<String, Map<State, List<ServiceComponentHost>>>> changedScHostsCapture = new Capture<Map<String, Map<State, List<ServiceComponentHost>>>>();
+    Capture<Map<String, String>> requestParametersCapture = new Capture<Map<String, String>>();
+    Capture<Collection<ServiceComponentHost>> ignoredScHostsCapture = new Capture<Collection<ServiceComponentHost>>();
+    Capture<Cluster> clusterCapture = new Capture<Cluster>();
+
+    expect(managementController.createStages(capture(clusterCapture), capture(requestPropertiesCapture), capture(requestParametersCapture), capture(changedServicesCapture), capture(changedCompsCapture), capture(changedScHostsCapture), capture(ignoredScHostsCapture), anyBoolean(), anyBoolean()
+    )).andReturn(requestStatusResponse);
 
     // replay
-    replay(managementController, response);
+    replay(managementController, clusters, cluster,
+        service0, serviceFactory, ambariMetaInfo, requestStatusResponse);
 
-    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
+    ResourceProvider provider = getServiceProvider(managementController);
 
     // add the property map to a set for the request.
     Map<String, Object> properties = new LinkedHashMap<String, Object>();
 
-    properties.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "DEPLOYED");
+    properties.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
 
     // create the request
     Request request = PropertyHelper.getUpdateRequest(properties, mapRequestProps);
@@ -221,63 +298,67 @@ public class ServiceResourceProviderTest {
     provider.updateResources(request, predicate);
 
     // verify
-    verify(managementController, response);
-
-    Set<ServiceRequest> setRequests = requestsCapture.getValue();
-    assertEquals(1, setRequests.size());
-    ServiceRequest sr = setRequests.iterator().next();
-    assertEquals("Cluster100", sr.getClusterName());
-    assertEquals("Service102", sr.getServiceName());
-    assertEquals("DEPLOYED", sr.getDesiredState());
-    assertNull(sr.getConfigVersions());
+    verify(managementController, clusters, cluster,
+        service0, serviceFactory, ambariMetaInfo, requestStatusResponse);
   }
 
   @Test
   public void testReconfigureClientsFlag() throws Exception {
-    Resource.Type type = Resource.Type.Service;
-
     AmbariManagementController managementController1 = createMock(AmbariManagementController.class);
     AmbariManagementController managementController2 = createMock
-      (AmbariManagementController.class);
+        (AmbariManagementController.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    Service service0 = createNiceMock(Service.class);
+    ServiceResponse serviceResponse0 = createNiceMock(ServiceResponse.class);
+    AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
 
     RequestStatusResponse response1 = createNiceMock(RequestStatusResponse.class);
     RequestStatusResponse response2 = createNiceMock(RequestStatusResponse
       .class);
-    Capture<Set<ServiceRequest>> requestsCapture = new Capture<Set<ServiceRequest>>();
 
     Map<String, String> mapRequestProps = new HashMap<String, String>();
     mapRequestProps.put("context", "Called from a test");
 
-    Set<ServiceResponse> nameResponse = new HashSet<ServiceResponse>();
-    nameResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-
     // set expectations
-    expect(managementController1.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(nameResponse).once();
-    expect(managementController2.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(nameResponse).once();
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
 
-    // set expectations
-    expect(managementController1.updateServices(capture(requestsCapture),
-      eq(mapRequestProps), eq(false), eq(true))).andReturn(response1).once();
+    expect(managementController1.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController1.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+
+    expect(managementController2.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController2.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+
+    expect(cluster.getService("Service102")).andReturn(service0).anyTimes();
+
+    expect(service0.convertToResponse()).andReturn(serviceResponse0).anyTimes();
+    expect(service0.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
+    expect(service0.getServiceComponents()).andReturn(Collections.<String, ServiceComponent>emptyMap()).anyTimes();
 
-    expect(managementController2.updateServices(capture(requestsCapture),
-      eq(mapRequestProps), eq(false), eq(false))).andReturn(response2).once();
+    expect(serviceResponse0.getClusterName()).andReturn("Cluster100").anyTimes();
+    expect(serviceResponse0.getServiceName()).andReturn("Service102").anyTimes();
+
+    Capture<Map<String, String>> requestPropertiesCapture = new Capture<Map<String, String>>();
+    Capture<Map<State, List<Service>>> changedServicesCapture = new Capture<Map<State, List<Service>>>();
+    Capture<Map<State, List<ServiceComponent>>> changedCompsCapture = new Capture<Map<State, List<ServiceComponent>>>();
+    Capture<Map<String, Map<State, List<ServiceComponentHost>>>> changedScHostsCapture = new Capture<Map<String, Map<State, List<ServiceComponentHost>>>>();
+    Capture<Map<String, String>> requestParametersCapture = new Capture<Map<String, String>>();
+    Capture<Collection<ServiceComponentHost>> ignoredScHostsCapture = new Capture<Collection<ServiceComponentHost>>();
+    Capture<Cluster> clusterCapture = new Capture<Cluster>();
+
+    expect(managementController1.createStages(capture(clusterCapture), capture(requestPropertiesCapture), capture(requestParametersCapture), capture(changedServicesCapture), capture(changedCompsCapture), capture(changedScHostsCapture), capture(ignoredScHostsCapture), anyBoolean(), anyBoolean()
+    )).andReturn(response1);
+
+    expect(managementController2.createStages(capture(clusterCapture), capture(requestPropertiesCapture), capture(requestParametersCapture), capture(changedServicesCapture), capture(changedCompsCapture), capture(changedScHostsCapture), capture(ignoredScHostsCapture), anyBoolean(), anyBoolean()
+    )).andReturn(response2);
 
     // replay
-    replay(managementController1, response1);
-    replay(managementController2, response2);
-
-    ResourceProvider provider1 = AbstractControllerResourceProvider.getResourceProvider(
-      type,
-      PropertyHelper.getPropertyIds(type),
-      PropertyHelper.getKeyPropertyIds(type),
-      managementController1);
-
-    ResourceProvider provider2 = AbstractControllerResourceProvider
-      .getResourceProvider(
-      type,
-      PropertyHelper.getPropertyIds(type),
-      PropertyHelper.getKeyPropertyIds(type),
-      managementController2);
+    replay(managementController1, response1, managementController2, response2,
+        clusters, cluster, service0, serviceResponse0, ambariMetaInfo);
+
+    ResourceProvider provider1 = getServiceProvider(managementController1);
+
+    ResourceProvider provider2 = getServiceProvider(managementController2);
 
     // add the property map to a set for the request.
     Map<String, Object> properties = new LinkedHashMap<String, Object>();
@@ -305,35 +386,33 @@ public class ServiceResourceProviderTest {
     provider2.updateResources(request, predicate2);
 
     // verify
-    verify(managementController1, response1);
-    verify(managementController2, response2);
+    verify(managementController1, response1, managementController2, response2,
+        clusters, cluster, service0, serviceResponse0, ambariMetaInfo);
   }
 
   @Test
   public void testDeleteResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
 
     // set expectations
-    expect(managementController.deleteServices(AbstractResourceProviderTest.Matcher.getServiceRequestSet(null, "Service100", null, null))).andReturn(response);
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+    cluster.deleteService("Service100");
 
     // replay
-    replay(managementController, response);
+    replay(managementController, clusters, cluster);
 
-    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
+    ResourceProvider provider = getServiceProvider(managementController);
 
     AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
 
     ((ObservableResourceProvider)provider).addObserver(observer);
 
     // delete the service named Service100
-    Predicate  predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service100").toPredicate();
+    Predicate  predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").and()
+        .property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service100").toPredicate();
     provider.deleteResources(predicate);
 
 
@@ -345,7 +424,7 @@ public class ServiceResourceProviderTest {
     Assert.assertNull(lastEvent.getRequest());
 
     // verify
-    verify(managementController, response);
+    verify(managementController, clusters, cluster);
   }
 
   @Test
@@ -393,4 +472,41 @@ public class ServiceResourceProviderTest {
     unsupported = provider.checkPropertyIds(Collections.singleton("config/unknown_property"));
     Assert.assertTrue(unsupported.isEmpty());
   }
+
+  public static ServiceResourceProvider getServiceProvider(AmbariManagementController managementController) {
+    Resource.Type type = Resource.Type.Service;
+
+    return (ServiceResourceProvider) AbstractControllerResourceProvider.getResourceProvider(
+        type,
+        PropertyHelper.getPropertyIds(type),
+        PropertyHelper.getKeyPropertyIds(type),
+        managementController);
+  }
+
+  public static void createServices(AmbariManagementController controller, Set<ServiceRequest> requests) throws AmbariException {
+    ServiceResourceProvider provider = getServiceProvider(controller);
+    provider.createServices(requests);
+  }
+
+  public static Set<ServiceResponse> getServices(AmbariManagementController controller,
+                                                 Set<ServiceRequest> requests) throws AmbariException {
+    ServiceResourceProvider provider = getServiceProvider(controller);
+    return provider.getServices(requests);
+  }
+
+  public static RequestStatusResponse updateServices(AmbariManagementController controller,
+                                                     Set<ServiceRequest> requests,
+                                                     Map<String, String> requestProperties, boolean runSmokeTest,
+                                                     boolean reconfigureClients) throws AmbariException
+  {
+    ServiceResourceProvider provider = getServiceProvider(controller);
+    return provider.updateServices(requests, requestProperties, runSmokeTest, reconfigureClients);
+  }
+
+  public static RequestStatusResponse deleteServices(AmbariManagementController controller, Set<ServiceRequest> requests)
+      throws AmbariException {
+    ServiceResourceProvider provider = getServiceProvider(controller);
+    return provider.deleteServices(requests);
+  }
+
 }

Some files were not shown because too many files changed in this diff