Browse Source

AMBARI-4034. Create the RequestSchedule resource provider. Patch 1. (swagle)

Siddharth Wagle 11 years ago
parent
commit
5dcea3726e
55 changed files with 3871 additions and 333 deletions
  1. 7 0
      ambari-server/conf/unix/ambari.properties
  2. 10 0
      ambari-server/pom.xml
  3. 78 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
  4. 4 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java
  5. 9 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/ClusterService.java
  6. 162 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestScheduleService.java
  7. 6 1
      ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/JsonRequestBodyParser.java
  8. 6 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java
  9. 40 4
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  10. 8 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
  11. 8 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  12. 9 5
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
  13. 8 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
  14. 89 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/RequestScheduleRequest.java
  15. 132 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/RequestScheduleResponse.java
  16. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
  17. 3 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
  18. 584 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestScheduleResourceProvider.java
  19. 1 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java
  20. 34 23
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java
  21. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleDAO.java
  22. 69 30
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchRequestEntity.java
  23. 3 17
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchRequestEntityPK.java
  24. 19 83
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleEntity.java
  25. 103 0
      ambari-server/src/main/java/org/apache/ambari/server/scheduler/AbstractLinearExecutionJob.java
  26. 34 0
      ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionJob.java
  27. 103 0
      ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduleManager.java
  28. 52 0
      ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduler.java
  29. 170 0
      ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java
  30. 21 0
      ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
  31. 223 116
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
  32. 0 5
      ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
  33. 46 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/Batch.java
  34. 109 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/BatchRequest.java
  35. 34 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/BatchRequestJob.java
  36. 47 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/BatchSettings.java
  37. 145 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecution.java
  38. 31 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecutionFactory.java
  39. 373 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecutionImpl.java
  40. 177 0
      ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/Schedule.java
  41. 63 0
      ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java
  42. 3 4
      ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
  43. 3 6
      ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
  44. 4 5
      ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
  45. 3 4
      ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql
  46. 1 1
      ambari-server/src/main/resources/META-INF/persistence.xml
  47. 4 0
      ambari-server/src/main/resources/key_properties.json
  48. 9 0
      ambari-server/src/main/resources/properties.json
  49. 5 0
      ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
  50. 404 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestScheduleResourceProviderTest.java
  51. 0 2
      ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
  52. 26 23
      ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RequestScheduleDAOTest.java
  53. 110 0
      ambari-server/src/test/java/org/apache/ambari/server/scheduler/ExecutionSchedulerTest.java
  54. 275 0
      ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java
  55. 1 0
      ambari-server/src/test/resources/password.dat

+ 7 - 0
ambari-server/conf/unix/ambari.properties

@@ -29,3 +29,10 @@ bootstrap.setup_agent.script=/usr/lib/python2.6/site-packages/ambari_server/setu
 api.authenticate=true
 server.connection.max.idle.millis=900000
 server.fqdn.service.url=http://169.254.169.254/latest/meta-data/public-hostname
+
+# Scheduler settings
+server.execution.scheduler.isClustered=false
+server.execution.scheduler.maxThreads=5
+server.execution.scheduler.maxDbConnections=5
+server.execution.scheduler.misfire.toleration.minutes=480
+

+ 10 - 0
ambari-server/pom.xml

@@ -832,6 +832,16 @@
       <artifactId>jsr305</artifactId>
       <version>1.3.9</version>
     </dependency>
+    <dependency>
+      <groupId>org.quartz-scheduler</groupId>
+      <artifactId>quartz</artifactId>
+      <version>2.2.1</version>
+    </dependency>
+    <dependency>
+      <groupId>org.quartz-scheduler</groupId>
+      <artifactId>quartz-jobs</artifactId>
+      <version>2.2.1</version>
+    </dependency>
   </dependencies>
   <!--<reporting>
         <plugins>

+ 78 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java

@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.api.resources;
+
+import org.apache.ambari.server.api.services.Request;
+import org.apache.ambari.server.api.util.TreeNode;
+import org.apache.ambari.server.controller.spi.Resource;
+import java.util.Collections;
+import java.util.List;
+
+public class RequestScheduleResourceDefinition extends BaseResourceDefinition {
+  /**
+   * Constructor.
+   *
+   * @param resourceType resource type
+   */
+  public RequestScheduleResourceDefinition() {
+    super(Resource.Type.RequestSchedule);
+  }
+
+  @Override
+  public String getPluralName() {
+    return "request_schedules";
+  }
+
+  @Override
+  public String getSingularName() {
+    return "request_schedule";
+  }
+
+  @Override
+  public List<PostProcessor> getPostProcessors() {
+    return Collections.<PostProcessor>singletonList(new
+      RequestScheduleHrefPostProcessor());
+  }
+
+  private class RequestScheduleHrefPostProcessor implements PostProcessor {
+
+    @Override
+    public void process(Request request, TreeNode<Resource> resultNode, String href) {
+      StringBuilder sb = new StringBuilder();
+      String[] tokens = href.split("/");
+
+      for (int i = 0; i < tokens.length; ++i) {
+        String s = tokens[i];
+        sb.append(s).append('/');
+        if ("clusters".equals(s)) {
+          sb.append(tokens[i + 1]).append('/');
+          break;
+        }
+      }
+
+      Object scheduleId = resultNode.getObject()
+        .getPropertyValue(getClusterController()
+          .getSchema(Resource.Type.RequestSchedule)
+            .getKeyPropertyId(Resource.Type.RequestSchedule));
+
+      sb.append("request_schedules/").append(scheduleId);
+
+      resultNode.setProperty("href", sb.toString());
+    }
+  }
+}

+ 4 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java

@@ -163,6 +163,10 @@ public class ResourceInstanceFactoryImpl implements ResourceInstanceFactory {
         resourceDefinition = new ConfigGroupResourceDefinition();
         break;
 
+      case RequestSchedule:
+        resourceDefinition = new RequestScheduleResourceDefinition();
+        break;
+
       default:
         throw new IllegalArgumentException("Unsupported resource type: " + type);
     }

+ 9 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/ClusterService.java

@@ -199,6 +199,15 @@ public class ClusterService extends BaseService {
     return new ConfigGroupService(clusterName);
   }
 
+  /**
+   * Gets the request schedule service
+   */
+  @Path("{clusterName}/request_schedules")
+  public RequestScheduleService getRequestScheduleService
+                             (@PathParam ("clusterName") String clusterName) {
+    return new RequestScheduleService(clusterName);
+  }
+
   /**
    * Create a cluster resource instance.
    *

+ 162 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestScheduleService.java

@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.api.services;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.controller.spi.Resource;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Service responsible for management of a batch of requests with attached
+ * schedule
+ */
+public class RequestScheduleService extends BaseService {
+  /**
+   * Parent cluster name.
+   */
+  private String m_clusterName;
+
+  /**
+   * Constructor
+   * @param m_clusterName
+   */
+  public RequestScheduleService(String m_clusterName) {
+    this.m_clusterName = m_clusterName;
+  }
+
+  /**
+   * Handles URL: /clusters/{clusterId}/request_schedules
+   * Get all the scheduled requests for a cluster.
+   *
+   * @param headers
+   * @param ui
+   * @return
+   */
+  @GET
+  @Produces("text/plain")
+  public Response getRequestSchedules(@Context HttpHeaders headers,
+                                  @Context UriInfo ui) {
+    return handleRequest(headers, null, ui, Request.Type.GET,
+      createRequestSchedule(m_clusterName, null));
+  }
+
+  /**
+   * Handles URL: /clusters/{clusterId}/request_schedules/{requestScheduleId}
+   * Get details on a specific request schedule
+   *
+   * @return
+   */
+  @GET
+  @Path("{requestScheduleId}")
+  @Produces("text/plain")
+  public Response getRequestSchedule(@Context HttpHeaders headers,
+                                     @Context UriInfo ui,
+                                     @PathParam("requestScheduleId") String requestScheduleId) {
+    return handleRequest(headers, null, ui, Request.Type.GET,
+      createRequestSchedule(m_clusterName, requestScheduleId));
+  }
+
+  /**
+   * Handles POST /clusters/{clusterId}/request_schedules
+   * Create a new request schedule
+   *
+   * @param body
+   * @param headers
+   * @param ui
+   * @return
+   */
+  @POST
+  @Produces("text/plain")
+  public Response createRequestSchedule(String body, @Context HttpHeaders
+    headers,
+                                    @Context UriInfo ui) {
+    return handleRequest(headers, body, ui, Request.Type.POST,
+      createRequestSchedule(m_clusterName, null));
+  }
+
+  /**
+   * Handles PUT /clusters/{clusterId}/request_schedules/{requestScheduleId}
+   * Update a request schedule
+   *
+   * @param body
+   * @param headers
+   * @param ui
+   * @param requestScheduleId
+   * @return
+   */
+  @PUT
+  @Path("{requestScheduleId}")
+  @Produces("text/plain")
+  public Response updateRequestSchedule(String body,
+                                        @Context HttpHeaders headers,
+                                        @Context UriInfo ui,
+                                        @PathParam("requestScheduleId") String requestScheduleId) {
+    return handleRequest(headers, body, ui, Request.Type.PUT,
+      createRequestSchedule(m_clusterName, requestScheduleId));
+  }
+
+  /**
+   * Handles DELETE /clusters/{clusterId}/request_schedules/{requestScheduleId}
+   * Delete a request schedule
+   *
+   * @param headers
+   * @param ui
+   * @param requestScheduleId
+   * @return
+   */
+  @DELETE
+  @Path("{requestScheduleId}")
+  @Produces("text/plain")
+  public Response deleteRequestSchedule(@Context HttpHeaders headers,
+                                    @Context UriInfo ui,
+                                    @PathParam("requestScheduleId") String requestScheduleId) {
+    return handleRequest(headers, null, ui, Request.Type.DELETE,
+      createRequestSchedule(m_clusterName, requestScheduleId));
+  }
+
+
+
+  /**
+   * Create a request schedule resource instance
+   * @param clusterName
+   * @param requestScheduleId
+   * @return
+   */
+  private ResourceInstance createRequestSchedule(String clusterName,
+                                                 String requestScheduleId) {
+    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
+    mapIds.put(Resource.Type.Cluster, clusterName);
+    mapIds.put(Resource.Type.RequestSchedule, requestScheduleId);
+
+    return createResource(Resource.Type.RequestSchedule, mapIds);
+  }
+
+}

+ 6 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/JsonRequestBodyParser.java

@@ -125,7 +125,12 @@ public class JsonRequestBodyParser implements RequestBodyParser {
         if (name.equals(BODY_TITLE)) {
           name = "";
         }
-        processNode(child, path.isEmpty() ? name : path + '/' + name, propertySet, requestInfoProps);
+        if (name.equals(REQUEST_BLOB_TITLE)) {
+          propertySet.getProperties().put(PropertyHelper.getPropertyId(path,
+            name), child.toString());
+        } else {
+          processNode(child, path.isEmpty() ? name : path + '/' + name, propertySet, requestInfoProps);
+        }
       } else {
         // field
         if (path.startsWith(REQUEST_INFO_PATH)) {

+ 6 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java

@@ -31,6 +31,12 @@ public interface RequestBodyParser {
    * RequestInfo category path.
    */
   public static final String REQUEST_INFO_PATH = "RequestInfo";
+
+  /**
+   * Category path to ignore parsing of the child node
+   */
+  public static final String REQUEST_BLOB_TITLE = "RequestBodyInfo";
+
   /**
    * Name of the query property which may exist under REQUEST_INFO_PATH.
    */

+ 40 - 4
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -262,6 +262,18 @@ public class Configuration {
    */
   private static final String REPO_SUFFIX_DEFAULT = "/repodata/repomd.xml";
 
+  public static final String EXECUTION_SCHEDULER_CLUSTERED =
+    "server.execution.scheduler.isClustered";
+  public static final String EXECUTION_SCHEDULER_THREADS =
+    "server.execution.scheduler.maxThreads";
+  public static final String EXECUTION_SCHEDULER_CONNECTIONS =
+    "server.execution.scheduler.maxDbConnections";
+  public static final String EXECUTION_SCHEDULER_MISFIRE_TOLERATION =
+    "server.execution.scheduler.misfire.toleration.minutes";
+  public static final String DEFAULT_SCHEDULER_THREAD_COUNT = "5";
+  public static final String DEFAULT_SCHEDULER_MAX_CONNECTIONS = "5";
+  public static final String DEFAULT_EXECUTION_SCHEDULER_MISFIRE_TOLERATION = "480";
+
   private static final Logger LOG = LoggerFactory.getLogger(
       Configuration.class);
 
@@ -590,11 +602,11 @@ public class Configuration {
   }
 
   public String getDatabaseDriver() {
-    return properties.getProperty(SERVER_JDBC_DRIVER_KEY);
+    return properties.getProperty(SERVER_JDBC_DRIVER_KEY, JDBC_LOCAL_DRIVER);
   }
 
   public String getDatabaseUrl() {
-    return properties.getProperty(SERVER_JDBC_URL_KEY);
+    return properties.getProperty(SERVER_JDBC_URL_KEY, getLocalDatabaseUrl());
   }
 
   public String getLocalDatabaseUrl() {
@@ -602,7 +614,7 @@ public class Configuration {
     if(dbName == null || dbName.isEmpty())
       throw new RuntimeException("Server DB Name is not configured!");
 
-    return JDBC_LOCAL_URL + properties.getProperty(SERVER_DB_NAME_KEY);
+    return JDBC_LOCAL_URL + dbName;
   }
 
   public String getDatabaseUser() {
@@ -611,7 +623,10 @@ public class Configuration {
 
   public String getDatabasePassword() {
     String passwdProp = properties.getProperty(SERVER_JDBC_USER_PASSWD_KEY);
-    String dbpasswd = readPasswordFromStore(passwdProp);
+    String dbpasswd = null;
+    if (CredentialProvider.isAliasString(passwdProp)) {
+      dbpasswd = readPasswordFromStore(passwdProp);
+    }
 
     if (dbpasswd != null)
       return dbpasswd;
@@ -850,4 +865,25 @@ public class Configuration {
     
     return value.split(",");
   }
+
+  public String isExecutionSchedulerClusterd() {
+    return properties.getProperty(EXECUTION_SCHEDULER_CLUSTERED, "false");
+  }
+
+  public String getExecutionSchedulerThreads() {
+    return properties.getProperty(EXECUTION_SCHEDULER_THREADS,
+      DEFAULT_SCHEDULER_THREAD_COUNT);
+  }
+
+  public String getExecutionSchedulerConnections() {
+    return properties.getProperty(EXECUTION_SCHEDULER_CONNECTIONS,
+      DEFAULT_SCHEDULER_MAX_CONNECTIONS);
+  }
+
+  public Long getExecutionSchedulerMisfireToleration() {
+    String limit = properties.getProperty
+      (EXECUTION_SCHEDULER_MISFIRE_TOLERATION,
+        DEFAULT_EXECUTION_SCHEDULER_MISFIRE_TOLERATION);
+    return Long.parseLong(limit);
+  }
 }

+ 8 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java

@@ -30,6 +30,7 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 
 import java.util.Collection;
 import java.util.List;
@@ -482,7 +483,7 @@ public interface AmbariManagementController {
    */
   public Map<String, Map<String,String>> findConfigurationTagsWithOverrides(
           Cluster cluster, String hostName) throws AmbariException;
-  
+
   /**
    * Returns parameters for RCA database
    *
@@ -490,5 +491,11 @@ public interface AmbariManagementController {
    *
    */
   public Map<String, String> getRcaParameters();
+
+  /**
+   * Get the Factory to create Request schedules
+   * @return
+   */
+  public RequestExecutionFactory getRequestExecutionFactory();
 }
   

+ 8 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -61,6 +61,7 @@ import org.apache.ambari.server.stageplanner.RoleGraph;
 import org.apache.ambari.server.state.*;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostMaintenanceEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostRestoreEvent;
@@ -130,6 +131,8 @@ public class AmbariManagementControllerImpl implements
   private ConfigGroupFactory configGroupFactory;
   @Inject
   private ConfigHelper configHelper;
+  @Inject
+  private RequestExecutionFactory requestExecutionFactory;
 
   final private String masterHostname;
   final private Integer masterPort;
@@ -1006,6 +1009,11 @@ public class AmbariManagementControllerImpl implements
     return configHelper.getEffectiveDesiredTags(cluster, hostName);
   }
 
+  @Override
+  public RequestExecutionFactory getRequestExecutionFactory() {
+    return requestExecutionFactory;
+  }
+
   private List<Stage> doStageCreation(Cluster cluster,
       Map<State, List<Service>> changedServices,
       Map<State, List<ServiceComponent>> changedComps,

+ 9 - 5
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

@@ -47,6 +47,8 @@ import org.apache.ambari.server.orm.PersistenceType;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.resources.ResourceManager;
 import org.apache.ambari.server.resources.api.rest.GetResource;
+import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
+import org.apache.ambari.server.scheduler.ExecutionScheduler;
 import org.apache.ambari.server.security.CertificateManager;
 import org.apache.ambari.server.security.SecurityFilter;
 import org.apache.ambari.server.security.authorization.AmbariLdapAuthenticationProvider;
@@ -111,7 +113,6 @@ public class AmbariServer {
     return configs.getServerOsType();
   }
 
-
   private static AmbariManagementController clusterController = null;
 
   public static AmbariManagementController getController() {
@@ -349,6 +350,11 @@ public class AmbariServer {
       AmbariManagementController controller = injector.getInstance(
           AmbariManagementController.class);
 
+      LOG.info("********* Initializing Scheduled Request Manager **********");
+      ExecutionScheduleManager executionScheduleManager = injector
+        .getInstance(ExecutionScheduleManager.class);
+
+
       clusterController = controller;
 
       // FIXME need to figure out correct order of starting things to
@@ -365,10 +371,8 @@ public class AmbariServer {
       manager.start();
       LOG.info("********* Started ActionManager **********");
 
-      //TODO: Remove this code when APIs are ready for testing.
-      //      RequestInjectorForTest testInjector = new RequestInjectorForTest(controller, clusters);
-      //      Thread testInjectorThread = new Thread(testInjector);
-      //      testInjectorThread.start();
+      executionScheduleManager.start();
+      LOG.info("********* Started Scheduled Request Manager **********");
 
       server.join();
       LOG.info("Joined the Server");

+ 8 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java

@@ -32,6 +32,8 @@ import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl;
 import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.PersistenceType;
+import org.apache.ambari.server.scheduler.ExecutionScheduler;
+import org.apache.ambari.server.scheduler.ExecutionSchedulerImpl;
 import org.apache.ambari.server.serveraction.ServerActionManager;
 import org.apache.ambari.server.serveraction.ServerActionManagerImpl;
 import org.apache.ambari.server.state.Cluster;
@@ -56,6 +58,9 @@ import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.configgroup.ConfigGroupImpl;
 import org.apache.ambari.server.state.host.HostFactory;
 import org.apache.ambari.server.state.host.HostImpl;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.scheduler.RequestExecutionImpl;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
 import org.springframework.security.crypto.password.PasswordEncoder;
 import org.springframework.security.crypto.password.StandardPasswordEncoder;
@@ -112,6 +117,7 @@ public class ControllerModule extends AbstractModule {
         .to(AmbariManagementControllerImpl.class);
     bind(AbstractRootServiceResponseFactory.class).to(RootServiceResponseFactory.class);
     bind(ServerActionManager.class).to(ServerActionManagerImpl.class);
+    bind(ExecutionScheduler.class).to(ExecutionSchedulerImpl.class);
 
     requestStaticInjection(ExecutionCommandWrapper.class);
   }
@@ -189,6 +195,8 @@ public class ControllerModule extends AbstractModule {
         Config.class, ConfigImpl.class).build(ConfigFactory.class));
     install(new FactoryModuleBuilder().implement(
       ConfigGroup.class, ConfigGroupImpl.class).build(ConfigGroupFactory.class));
+    install(new FactoryModuleBuilder().implement(RequestExecution.class,
+      RequestExecutionImpl.class).build(RequestExecutionFactory.class));
     install(new FactoryModuleBuilder().build(StageFactory.class));
     bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
   }

+ 89 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/RequestScheduleRequest.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller;
+
+import org.apache.ambari.server.state.scheduler.Batch;
+import org.apache.ambari.server.state.scheduler.Schedule;
+
+public class RequestScheduleRequest {
+  private Long id;
+  private String clusterName;
+  private String description;
+  private String status;
+  private Batch batch;
+  private Schedule schedule;
+
+  public RequestScheduleRequest(Long id, String clusterName,
+                                String description, String status,
+                                Batch batch, Schedule schedule) {
+    this.id = id;
+    this.clusterName = clusterName;
+    this.description = description;
+    this.status = status;
+    this.batch = batch;
+    this.schedule = schedule;
+  }
+
+  public Long getId() {
+    return id;
+  }
+
+  public void setId(Long id) {
+    this.id = id;
+  }
+
+  public String getClusterName() {
+    return clusterName;
+  }
+
+  public void setClusterName(String clusterName) {
+    this.clusterName = clusterName;
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public String getStatus() {
+    return status;
+  }
+
+  public void setStatus(String status) {
+    this.status = status;
+  }
+
+  public Batch getBatch() {
+    return batch;
+  }
+
+  public void setBatch(Batch batch) {
+    this.batch = batch;
+  }
+
+  public Schedule getSchedule() {
+    return schedule;
+  }
+
+  public void setSchedule(Schedule schedule) {
+    this.schedule = schedule;
+  }
+}

+ 132 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/RequestScheduleResponse.java

@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import org.apache.ambari.server.state.scheduler.Batch;
+import org.apache.ambari.server.state.scheduler.Schedule;
+
+public class RequestScheduleResponse {
+  private Long id;
+  private String clusterName;
+  private String description;
+  private String status;
+  private Batch batch;
+  private Schedule schedule;
+  private String createUser;
+  private String createTime;
+  private String updateUser;
+  private String updateTime;
+
+  public RequestScheduleResponse(Long id, String clusterName,
+                                 String description, String status,
+                                 Batch batch, Schedule schedule,
+                                 String createUser, String createTime,
+                                 String updateUser, String updateTime) {
+    this.id = id;
+    this.clusterName = clusterName;
+    this.description = description;
+    this.status = status;
+    this.batch = batch;
+    this.schedule = schedule;
+    this.createUser = createUser;
+    this.createTime = createTime;
+    this.updateUser = updateUser;
+    this.updateTime = updateTime;
+  }
+
+  public Long getId() {
+    return id;
+  }
+
+  public void setId(Long id) {
+    this.id = id;
+  }
+
+  public String getClusterName() {
+    return clusterName;
+  }
+
+  public void setClusterName(String clusterName) {
+    this.clusterName = clusterName;
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public String getStatus() {
+    return status;
+  }
+
+  public void setStatus(String status) {
+    this.status = status;
+  }
+
+  public Batch getBatch() {
+    return batch;
+  }
+
+  public void setBatch(Batch batch) {
+    this.batch = batch;
+  }
+
+  public Schedule getSchedule() {
+    return schedule;
+  }
+
+  public void setSchedule(Schedule schedule) {
+    this.schedule = schedule;
+  }
+
+  public String getCreateUser() {
+    return createUser;
+  }
+
+  public void setCreateUser(String createUser) {
+    this.createUser = createUser;
+  }
+
+  public String getCreateTime() {
+    return createTime;
+  }
+
+  public void setCreateTime(String createTime) {
+    this.createTime = createTime;
+  }
+
+  public String getUpdateUser() {
+    return updateUser;
+  }
+
+  public void setUpdateUser(String updateUser) {
+    this.updateUser = updateUser;
+  }
+
+  public String getUpdateTime() {
+    return updateTime;
+  }
+
+  public void setUpdateTime(String updateTime) {
+    this.updateTime = updateTime;
+  }
+}

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java

@@ -126,6 +126,8 @@ public abstract class AbstractControllerResourceProvider extends AbstractResourc
         return new RootServiceHostComponentResourceProvider(propertyIds, keyPropertyIds, managementController);
       case ConfigGroup:
         return new ConfigGroupResourceProvider(propertyIds, keyPropertyIds, managementController);
+      case RequestSchedule:
+        return new RequestScheduleResourceProvider(propertyIds, keyPropertyIds, managementController);
       default:
         throw new IllegalArgumentException("Unknown type " + type);
     }

+ 3 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java

@@ -343,7 +343,8 @@ public class ConfigGroupResourceProvider extends
       cluster = clusters.getCluster(request.getClusterName());
     } catch (ClusterNotFoundException e) {
       throw new ParentObjectNotFoundException(
-        "Attempted to add a service to a cluster which doesn't exist", e);
+        "Attempted to delete a config group from a cluster which doesn't " +
+          "exist", e);
     }
 
     configLogger.info("Deleting Config group, "
@@ -469,7 +470,7 @@ public class ConfigGroupResourceProvider extends
         cluster = clusters.getCluster(request.getClusterName());
       } catch (ClusterNotFoundException e) {
         throw new ParentObjectNotFoundException(
-          "Attempted to add a service to a cluster which doesn't exist", e);
+          "Attempted to add a config group to a cluster which doesn't exist", e);
       }
 
       if (request.getId() == null) {

+ 584 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestScheduleResourceProvider.java

@@ -0,0 +1,584 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ClusterNotFoundException;
+import org.apache.ambari.server.ParentObjectNotFoundException;
+import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.RequestScheduleRequest;
+import org.apache.ambari.server.controller.RequestScheduleResponse;
+import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
+import org.apache.ambari.server.controller.spi.NoSuchResourceException;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.RequestStatus;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.scheduler.Batch;
+import org.apache.ambari.server.state.scheduler.BatchRequest;
+import org.apache.ambari.server.state.scheduler.BatchSettings;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.scheduler.Schedule;
+import org.apache.ambari.server.utils.DateUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class RequestScheduleResourceProvider extends AbstractControllerResourceProvider {
+  private static final Logger LOG = LoggerFactory.getLogger
+    (RequestScheduleResourceProvider.class);
+
+  protected static final String REQUEST_SCHEDULE_ID_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "id");
+  protected static final String REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "cluster_name");
+  protected static final String REQUEST_SCHEDULE_DESC_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "description");
+  protected static final String REQUEST_SCHEDULE_STATUS_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "status");
+  protected static final String REQUEST_SCHEDULE_BATCH_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "batch");
+  protected static final String REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "schedule");
+  protected static final String REQUEST_SCHEDULE_CREATE_USER_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "create_user");
+  protected static final String REQUEST_SCHEDULE_UPDATE_USER_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "update_user");
+  protected static final String REQUEST_SCHEDULE_CREATE_TIME_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "create_time");
+  protected static final String REQUEST_SCHEDULE_UPDATE_TIME_PROPERTY_ID =
+    PropertyHelper.getPropertyId("RequestSchedule", "update_time");
+
+  protected static final String REQUEST_SCHEDULE_BATCH_SEPARATION_PROPERTY_ID =
+    PropertyHelper.getPropertyId("batch_settings", "batch_separation_in_minutes");
+  protected static final String REQUEST_SCHEDULE_BATCH_TOLERATION_PROPERTY_ID =
+    PropertyHelper.getPropertyId("batch_settings", "task_failure_tolerance");
+  protected static final String REQUEST_SCHEDULE_BATCH_REQUESTS_PROPERTY_ID =
+    PropertyHelper.getPropertyId(null, "requests");
+
+  protected static final String BATCH_REQUEST_TYPE_PROPERTY_ID =
+    PropertyHelper.getPropertyId(null, "type");
+  protected static final String BATCH_REQUEST_URI_PROPERTY_ID =
+    PropertyHelper.getPropertyId(null, "uri");
+  protected static final String BATCH_REQUEST_ORDER_ID_PROPERTY_ID =
+    PropertyHelper.getPropertyId(null, "order_id");
+  protected static final String BATCH_REQUEST_BODY_PROPERTY_ID =
+    PropertyHelper.getPropertyId(null, RequestBodyParser.REQUEST_BLOB_TITLE);
+
+  protected static final String SCHEDULE_DAYS_OF_MONTH_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "days_of_month");
+  protected static final String SCHEDULE_MINUTES_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "minutes");
+  protected static final String SCHEDULE_HOURS_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "hours");
+  protected static final String SCHEDULE_YEAR_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "year");
+  protected static final String SCHEDULE_DAY_OF_WEEK_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "day_of_week");
+  protected static final String SCHEDULE_MONTH_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "month");
+  protected static final String SCHEDULE_START_TIME_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "startTime");
+  protected static final String SCHEDULE_END_TIME_PROPERTY_ID =
+    PropertyHelper.getPropertyId(REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID, "endTime");
+
+  private static Set<String> pkPropertyIds = new HashSet<String>(Arrays
+    .asList(new String[]{ REQUEST_SCHEDULE_ID_PROPERTY_ID }));
+
+  /**
+   * Create a  new resource provider for the given management controller.
+   *
+   * @param propertyIds          the property ids
+   * @param keyPropertyIds       the key property ids
+   * @param managementController the management controller
+   */
+
+  protected RequestScheduleResourceProvider(Set<String> propertyIds,
+      Map<Resource.Type, String> keyPropertyIds,
+        AmbariManagementController managementController) {
+    super(propertyIds, keyPropertyIds, managementController);
+  }
+
+
+  @Override
+  protected Set<String> getPKPropertyIds() {
+    return pkPropertyIds;
+  }
+
+  @Override
+  public RequestStatus createResources(Request request) throws SystemException,
+      UnsupportedPropertyException, ResourceAlreadyExistsException,
+      NoSuchParentResourceException {
+
+    final Set<RequestScheduleRequest> requests = new HashSet<RequestScheduleRequest>();
+
+    for (Map<String, Object> propertyMap : request.getProperties()) {
+      requests.add(getRequestScheduleRequest(propertyMap));
+    }
+
+    Set<RequestScheduleResponse> responses =
+      createResources(new Command<Set<RequestScheduleResponse>>() {
+      @Override
+      public Set<RequestScheduleResponse> invoke() throws AmbariException {
+        return createRequestSchedules(requests);
+      }
+    });
+
+    notifyCreate(Resource.Type.RequestSchedule, request);
+
+    Set<Resource> associatedResources = new HashSet<Resource>();
+    for (RequestScheduleResponse response : responses) {
+      Resource resource = new ResourceImpl(Resource.Type.RequestSchedule);
+      resource.setProperty(REQUEST_SCHEDULE_ID_PROPERTY_ID, response.getId());
+      associatedResources.add(resource);
+    }
+
+    return getRequestStatus(null, associatedResources);
+  }
+
+  @Override
+  public Set<Resource> getResources(Request request, Predicate predicate) throws
+      SystemException, UnsupportedPropertyException, NoSuchResourceException,
+      NoSuchParentResourceException {
+
+    final Set<RequestScheduleRequest> requests = new HashSet<RequestScheduleRequest>();
+    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+      requests.add(getRequestScheduleRequest(propertyMap));
+    }
+
+    Set<RequestScheduleResponse> responses =
+      getResources(new Command<Set<RequestScheduleResponse>>() {
+        @Override
+        public Set<RequestScheduleResponse> invoke() throws AmbariException {
+          return getRequestSchedules(requests);
+        }
+      });
+
+    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
+    Set<Resource> resources    = new HashSet<Resource>();
+
+    for (RequestScheduleResponse response : responses) {
+      Resource resource = new ResourceImpl(Resource.Type.RequestSchedule);
+
+      setResourceProperty(resource, REQUEST_SCHEDULE_ID_PROPERTY_ID,
+        response.getId(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID,
+        response.getClusterName(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_DESC_PROPERTY_ID,
+        response.getDescription(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_STATUS_PROPERTY_ID,
+        response.getStatus(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_BATCH_PROPERTY_ID,
+        response.getBatch(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_SCHEDULE_PROPERTY_ID,
+        response.getSchedule(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_CREATE_USER_PROPERTY_ID,
+        response.getCreateUser(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_CREATE_TIME_PROPERTY_ID,
+        response.getCreateTime(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_UPDATE_USER_PROPERTY_ID,
+        response.getUpdateUser(), requestedIds);
+      setResourceProperty(resource, REQUEST_SCHEDULE_UPDATE_TIME_PROPERTY_ID,
+        response.getUpdateTime(), requestedIds);
+
+      resources.add(resource);
+    }
+
+    return resources;
+  }
+
+  @Override
+  public RequestStatus updateResources(Request request, Predicate predicate)
+      throws SystemException, UnsupportedPropertyException,
+      NoSuchResourceException, NoSuchParentResourceException {
+
+    final Set<RequestScheduleRequest> requests = new
+      HashSet<RequestScheduleRequest>();
+
+    Iterator<Map<String,Object>> iterator = request.getProperties().iterator();
+    if (iterator.hasNext()) {
+      for (Map<String, Object> propertyMap : getPropertyMaps(iterator.next(), predicate)) {
+        requests.add(getRequestScheduleRequest(propertyMap));
+      }
+
+      modifyResources(new Command<Void>() {
+        @Override
+        public Void invoke() throws AmbariException {
+          updateRequestSchedule(requests);
+          return null;
+        }
+      });
+    }
+
+    notifyUpdate(Resource.Type.RequestSchedule, request, predicate);
+
+    return getRequestStatus(null);
+  }
+
+  @Override
+  public RequestStatus deleteResources(Predicate predicate) throws
+      SystemException, UnsupportedPropertyException, NoSuchResourceException,
+      NoSuchParentResourceException {
+
+    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+      final RequestScheduleRequest requestScheduleRequest =
+        getRequestScheduleRequest(propertyMap);
+
+      modifyResources(new Command<Void>() {
+        @Override
+        public Void invoke() throws AmbariException {
+          deleteRequestSchedule(requestScheduleRequest);
+          return null;
+        }
+      });
+    }
+
+    notifyDelete(Resource.Type.RequestSchedule, predicate);
+
+    return getRequestStatus(null);
+  }
+
+  private synchronized void deleteRequestSchedule(RequestScheduleRequest request)
+    throws AmbariException {
+
+    if (request.getId() == null) {
+      throw new AmbariException("Id is a required field.");
+    }
+
+    Clusters clusters = getManagementController().getClusters();
+
+    Cluster cluster;
+    try {
+      cluster = clusters.getCluster(request.getClusterName());
+    } catch (ClusterNotFoundException e) {
+      throw new ParentObjectNotFoundException(
+        "Attempted to delete a request schedule from a cluster which doesn't "
+          + "exist", e);
+    }
+
+    LOG.info("Deleting Request Schedule "
+      + ", clusterName = " + request.getClusterName()
+      + ", id = " + request.getId());
+
+    cluster.deleteRequestExecution(request.getId());
+  }
+
+  private synchronized void updateRequestSchedule
+    (Set<RequestScheduleRequest> requests) throws AmbariException {
+
+    if (requests.isEmpty()) {
+      LOG.warn("Received an empty requests set");
+      return;
+    }
+
+    Clusters clusters = getManagementController().getClusters();
+
+    for (RequestScheduleRequest request : requests) {
+
+      validateRequest(request);
+
+      Cluster cluster;
+      try {
+        cluster = clusters.getCluster(request.getClusterName());
+      } catch (ClusterNotFoundException e) {
+        throw new ParentObjectNotFoundException(
+          "Attempted to add a request schedule to a cluster which doesn't " +
+            "exist", e);
+      }
+
+      if (request.getId() == null) {
+        throw new AmbariException("Id is a required parameter.");
+      }
+
+      RequestExecution requestExecution =
+        cluster.getAllRequestExecutions().get(request.getId());
+
+      if (requestExecution == null) {
+        throw new AmbariException("Request Schedule not found "
+          + ", clusterName = " + request.getClusterName()
+          + ", description = " + request.getDescription()
+          + ", id = " + request.getId());
+      }
+
+      String username = getManagementController().getAuthName();
+
+      requestExecution.setBatch(request.getBatch());
+      requestExecution.setDescription(request.getDescription());
+      requestExecution.setSchedule(request.getSchedule());
+      if (request.getStatus() != null && isValidRequestScheduleStatus
+          (request.getStatus())) {
+        requestExecution.setStatus(RequestExecution.Status.valueOf(request.getStatus()));
+      }
+      requestExecution.setUpdateUser(username);
+
+      LOG.info("Persisting updated Request Schedule "
+        + ", clusterName = " + request.getClusterName()
+        + ", description = " + request.getDescription()
+        + ", user = " + username);
+
+      requestExecution.persist();
+    }
+  }
+
+  private synchronized Set<RequestScheduleResponse> createRequestSchedules
+    (Set<RequestScheduleRequest> requests) throws AmbariException {
+
+    if (requests.isEmpty()) {
+      LOG.warn("Received an empty requests set");
+      return null;
+    }
+
+    Set<RequestScheduleResponse> responses = new
+      HashSet<RequestScheduleResponse>();
+
+    Clusters clusters = getManagementController().getClusters();
+    RequestExecutionFactory requestExecutionFactory =
+      getManagementController().getRequestExecutionFactory();
+
+    for (RequestScheduleRequest request : requests) {
+
+      validateRequest(request);
+
+      Cluster cluster;
+      try {
+        cluster = clusters.getCluster(request.getClusterName());
+      } catch (ClusterNotFoundException e) {
+        throw new ParentObjectNotFoundException(
+          "Attempted to add a request schedule to a cluster which doesn't " +
+            "exist", e);
+      }
+
+      String username = getManagementController().getAuthName();
+
+      RequestExecution requestExecution = requestExecutionFactory.createNew
+        (cluster, request.getBatch(), request.getSchedule());
+
+      requestExecution.setCreateUser(username);
+      requestExecution.setUpdateUser(username);
+
+      LOG.info("Persisting new Request Schedule "
+        + ", clusterName = " + request.getClusterName()
+        + ", description = " + request.getDescription()
+        + ", user = " + username);
+
+      requestExecution.persist();
+      cluster.addRequestExecution(requestExecution);
+
+      RequestScheduleResponse response = new RequestScheduleResponse
+        (requestExecution.getId(), requestExecution.getClusterName(),
+          requestExecution.getDescription(), requestExecution.getStatus(),
+          requestExecution.getBatch(), request.getSchedule(),
+          requestExecution.getCreateUser(), requestExecution.getCreateTime(),
+          requestExecution.getUpdateUser(), requestExecution.getUpdateTime());
+
+      responses.add(response);
+    }
+
+    return responses;
+  }
+
+  private void validateRequest(RequestScheduleRequest request) {
+    if (request.getClusterName() == null) {
+      throw new IllegalArgumentException("Cluster name is required.");
+    }
+  }
+
+  private synchronized Set<RequestScheduleResponse> getRequestSchedules
+    (Set<RequestScheduleRequest> requests) throws AmbariException {
+
+    Set<RequestScheduleResponse> responses = new
+      HashSet<RequestScheduleResponse>();
+
+    if (requests != null) {
+      for (RequestScheduleRequest request : requests) {
+        if (request.getClusterName() == null) {
+          LOG.warn("Cluster name is a required field.");
+          continue;
+        }
+
+        Cluster cluster = getManagementController().getClusters().getCluster
+          (request.getClusterName());
+
+        Map<Long, RequestExecution> allRequestExecutions =
+          cluster.getAllRequestExecutions();
+
+        // Find by id
+        if (request.getId() != null) {
+          RequestExecution requestExecution = allRequestExecutions.get
+            (request.getId());
+          if (requestExecution != null) {
+            responses.add(requestExecution.convertToResponse());
+          }
+          continue;
+        }
+        // Find by status
+        if (request.getStatus() != null) {
+          for (RequestExecution requestExecution : allRequestExecutions.values()) {
+            if (requestExecution.getStatus().equals(request.getStatus())) {
+              responses.add(requestExecution.convertToResponse());
+            }
+          }
+          continue;
+        }
+        // TODO: Find by status of Batch Request(s) and start time greater than requested time
+
+        // Select all
+        for (RequestExecution requestExecution : allRequestExecutions.values()) {
+          responses.add(requestExecution.convertToResponse());
+        }
+      }
+    }
+
+    return responses;
+  }
+
+  private boolean isValidRequestScheduleStatus(String giveStatus) {
+    for (RequestExecution.Status status : RequestExecution.Status.values()) {
+      if (status.name().equalsIgnoreCase(giveStatus)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private RequestScheduleRequest getRequestScheduleRequest(Map<String, Object> properties) {
+    Object idObj = properties.get(REQUEST_SCHEDULE_ID_PROPERTY_ID);
+    Long id = null;
+    if (idObj != null)  {
+      id = idObj instanceof Long ? (Long) idObj :
+        Long.parseLong((String) idObj);
+    }
+
+    RequestScheduleRequest requestScheduleRequest = new RequestScheduleRequest(
+      id,
+      (String) properties.get(REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID),
+      (String) properties.get(REQUEST_SCHEDULE_DESC_PROPERTY_ID),
+      (String) properties.get(REQUEST_SCHEDULE_STATUS_PROPERTY_ID),
+      null,
+      null);
+
+    Batch batch = new Batch();
+    BatchSettings batchSettings = new BatchSettings();
+    List<BatchRequest> batchRequests = new ArrayList<BatchRequest>();
+
+    Object batchObject = properties.get(REQUEST_SCHEDULE_BATCH_PROPERTY_ID);
+    if (batchObject != null && batchObject instanceof HashSet<?>) {
+      try {
+        HashSet<Map<String, Object>> batchMap = (HashSet<Map<String, Object>>) batchObject;
+
+        for (Map<String, Object> batchEntry : batchMap) {
+          if (batchEntry != null) {
+            for (Map.Entry<String, Object> batchMapEntry : batchEntry.entrySet()) {
+              if (batchMapEntry.getKey().equals
+                  (REQUEST_SCHEDULE_BATCH_TOLERATION_PROPERTY_ID)) {
+                batchSettings.setTaskFailureToleranceLimit(Integer.valueOf
+                  ((String) batchMapEntry.getValue()));
+              } else if (batchMapEntry.getKey().equals
+                  (REQUEST_SCHEDULE_BATCH_SEPARATION_PROPERTY_ID)) {
+                batchSettings.setBatchSeparationInMinutes(Integer.valueOf
+                  ((String) batchMapEntry.getValue()));
+              } else if (batchMapEntry.getKey().equals
+                  (REQUEST_SCHEDULE_BATCH_REQUESTS_PROPERTY_ID)) {
+                HashSet<Map<String, Object>> requestSet =
+                  (HashSet<Map<String, Object>>) batchMapEntry.getValue();
+
+                for (Map<String, Object> requestEntry : requestSet) {
+                  if (requestEntry != null) {
+                    BatchRequest batchRequest = new BatchRequest();
+                    for (Map.Entry<String, Object> requestMapEntry :
+                        requestEntry.entrySet()) {
+                      if (requestMapEntry.getKey()
+                                 .equals(BATCH_REQUEST_TYPE_PROPERTY_ID)) {
+                        batchRequest.setType(BatchRequest.Type.valueOf
+                          ((String) requestMapEntry.getValue()));
+                      } else if (requestMapEntry.getKey()
+                                 .equals(BATCH_REQUEST_URI_PROPERTY_ID)) {
+                        batchRequest.setUri(
+                          (String) requestMapEntry.getValue());
+                      } else if (requestMapEntry.getKey()
+                                .equals(BATCH_REQUEST_ORDER_ID_PROPERTY_ID)) {
+                        batchRequest.setOrderId(Long.parseLong(
+                          (String) requestMapEntry.getValue()));
+                      } else if (requestMapEntry.getKey()
+                                .equals(BATCH_REQUEST_BODY_PROPERTY_ID)) {
+                        batchRequest.setBody(
+                          (String) requestMapEntry.getValue());
+                      }
+                    }
+                    batchRequests.add(batchRequest);
+                  }
+                }
+              }
+            }
+          }
+        }
+
+        batch.getBatchRequests().addAll(batchRequests);
+        batch.setBatchSettings(batchSettings);
+
+      } catch (Exception e) {
+        LOG.warn("Request Schedule batch json is unparseable. " +
+          batchObject, e);
+      }
+    }
+
+    requestScheduleRequest.setBatch(batch);
+
+    Schedule schedule = new Schedule();
+    for (Map.Entry<String, Object> propertyEntry : properties.entrySet()) {
+      if (propertyEntry.getKey().equals(SCHEDULE_DAY_OF_WEEK_PROPERTY_ID)) {
+        schedule.setDayOfWeek((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_DAYS_OF_MONTH_PROPERTY_ID)) {
+        schedule.setDaysOfMonth((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_END_TIME_PROPERTY_ID)) {
+        schedule.setEndTime((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_HOURS_PROPERTY_ID)) {
+        schedule.setHours((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_MINUTES_PROPERTY_ID)) {
+        schedule.setMinutes((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_MONTH_PROPERTY_ID)) {
+        schedule.setMonth((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_START_TIME_PROPERTY_ID)) {
+        schedule.setStartTime((String) propertyEntry.getValue());
+      } else if (propertyEntry.getKey().equals(SCHEDULE_YEAR_PROPERTY_ID)) {
+        schedule.setYear((String) propertyEntry.getValue());
+      }
+    }
+
+    if (!schedule.isEmpty()) {
+      requestScheduleRequest.setSchedule(schedule);
+    }
+
+    return requestScheduleRequest;
+  }
+}

+ 1 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java

@@ -79,6 +79,7 @@ public interface Resource {
     ConfigGroup,
     Action,
     Request,
+    RequestSchedule,
     Task,
     User,
     Stack,

+ 34 - 23
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchHostDAO.java → ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleBatchRequestDAO.java

@@ -21,32 +21,33 @@ import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.Singleton;
 import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntity;
-import org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntityPK;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchRequestEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchRequestEntityPK;
 import javax.persistence.EntityManager;
 import javax.persistence.NoResultException;
 import javax.persistence.TypedQuery;
 import java.util.List;
 
 @Singleton
-public class RequestScheduleBatchHostDAO {
+public class RequestScheduleBatchRequestDAO {
   @Inject
   Provider<EntityManager> entityManagerProvider;
   @Inject
   DaoUtils daoUtils;
 
   @Transactional
-  public RequestScheduleBatchHostEntity findByPK
-    (RequestScheduleBatchHostEntityPK requestScheduleBatchHostEntityPK) {
-    return entityManagerProvider.get().find(RequestScheduleBatchHostEntity
-      .class, requestScheduleBatchHostEntityPK);
+  public RequestScheduleBatchRequestEntity findByPk
+    (RequestScheduleBatchRequestEntityPK batchRequestEntity) {
+
+    return entityManagerProvider.get()
+      .find(RequestScheduleBatchRequestEntity.class, batchRequestEntity);
   }
 
   @Transactional
-  public List<RequestScheduleBatchHostEntity> findBySchedule(Long scheduleId) {
-    TypedQuery<RequestScheduleBatchHostEntity> query = entityManagerProvider
-      .get().createNamedQuery("batchHostsBySchedule",
-        RequestScheduleBatchHostEntity.class);
+  public List<RequestScheduleBatchRequestEntity> findByScheduleId(Long scheduleId) {
+    TypedQuery<RequestScheduleBatchRequestEntity> query = entityManagerProvider
+      .get().createNamedQuery("findByScheduleId",
+        RequestScheduleBatchRequestEntity.class);
 
     query.setParameter("id", scheduleId);
     try {
@@ -57,32 +58,42 @@ public class RequestScheduleBatchHostDAO {
   }
 
   @Transactional
-  public void create(RequestScheduleBatchHostEntity batchHostEntity) {
-    entityManagerProvider.get().persist(batchHostEntity);
+  public void create(RequestScheduleBatchRequestEntity batchRequestEntity) {
+    entityManagerProvider.get().persist(batchRequestEntity);
+  }
+
+  @Transactional
+  public RequestScheduleBatchRequestEntity merge
+    (RequestScheduleBatchRequestEntity batchRequestEntity) {
+
+    return entityManagerProvider.get().merge(batchRequestEntity);
   }
 
   @Transactional
-  public RequestScheduleBatchHostEntity merge(RequestScheduleBatchHostEntity batchHostEntity) {
-    return entityManagerProvider.get().merge(batchHostEntity);
+  public void refresh(RequestScheduleBatchRequestEntity batchRequestEntity) {
+    entityManagerProvider.get().refresh(batchRequestEntity);
   }
 
   @Transactional
-  public void refresh(RequestScheduleBatchHostEntity batchHostEntity) {
-    entityManagerProvider.get().refresh(batchHostEntity);
+  public void remove(RequestScheduleBatchRequestEntity batchRequestEntity) {
+    entityManagerProvider.get().remove(merge(batchRequestEntity));
   }
 
   @Transactional
-  public void remove(RequestScheduleBatchHostEntity batchHostEntity) {
-    entityManagerProvider.get().remove(batchHostEntity);
+  public void removeByPk(RequestScheduleBatchRequestEntityPK
+                             batchRequestEntityPK) {
+    entityManagerProvider.get().remove(findByPk(batchRequestEntityPK));
   }
 
   @Transactional
-  public void removeBySchedule(Long scheduleId) {
-    TypedQuery<Long> query = entityManagerProvider.get().createQuery(
-      "DELETE FROM RequestScheduleBatchHostEntity batchHosts WHERE " +
-        "batchHosts.scheduleId = ?1", Long.class);
+  public void removeByScheduleId(Long scheduleId) {
+    TypedQuery<Long> query = entityManagerProvider.get().createQuery
+      ("DELETE FROM RequestScheduleBatchRequestEntity batchreq WHERE " +
+        "batchreq.scheduleId = ?1", Long.class);
 
     daoUtils.executeUpdate(query, scheduleId);
+    // Flush to current transaction required in order to avoid Eclipse link
+    // from re-ordering delete
     entityManagerProvider.get().flush();
   }
 }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestScheduleDAO.java

@@ -73,7 +73,7 @@ public class RequestScheduleDAO {
 
   @Transactional
   public void remove(RequestScheduleEntity requestScheduleEntity) {
-    entityManagerProvider.get().remove(requestScheduleEntity);
+    entityManagerProvider.get().remove(merge(requestScheduleEntity));
   }
 
   @Transactional

+ 69 - 30
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntity.java → ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchRequestEntity.java

@@ -20,6 +20,7 @@ package org.apache.ambari.server.orm.entities;
 import javax.persistence.Column;
 import javax.persistence.Entity;
 import javax.persistence.Id;
+import javax.persistence.IdClass;
 import javax.persistence.JoinColumn;
 import javax.persistence.JoinColumns;
 import javax.persistence.ManyToOne;
@@ -27,15 +28,14 @@ import javax.persistence.NamedQueries;
 import javax.persistence.NamedQuery;
 import javax.persistence.Table;
 
-@Table(name = "requestschedulebatchhost")
+@IdClass(RequestScheduleBatchRequestEntityPK.class)
 @Entity
+@Table(name = "requestschedulebatchrequest")
 @NamedQueries({
-  @NamedQuery(name = "batchHostsBySchedule", query =
-    "SELECT batchHost FROM RequestScheduleBatchHostEntity batchHost " +
-      "WHERE batchHost.scheduleId=:id")
+  @NamedQuery(name = "findByScheduleId", query = "SELECT batchreqs FROM " +
+    "RequestScheduleBatchRequestEntity  batchreqs WHERE batchreqs.scheduleId=:id")
 })
-public class RequestScheduleBatchHostEntity {
-
+public class RequestScheduleBatchRequestEntity {
   @Id
   @Column(name = "schedule_id", nullable = false, insertable = true, updatable = true)
   private Long scheduleId;
@@ -44,17 +44,26 @@ public class RequestScheduleBatchHostEntity {
   @Column(name = "batch_id", nullable = false, insertable = true, updatable = true)
   private Long batchId;
 
-  @Id
-  @Column(name = "host_name", nullable = false, insertable = true, updatable = true)
-  private String hostName;
+  @Column(name = "request_id")
+  private Long requestId;
 
-  @Column(name = "batch_name")
-  private String batchName;
+  @Column(name = "request_type")
+  private String requestType;
 
-  @ManyToOne
-  @JoinColumns({
-    @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false, insertable = false, updatable = false) })
-  private HostEntity hostEntity;
+  @Column(name = "request_uri")
+  private String requestUri;
+
+  @Column(name = "request_body")
+  private String requestBody;
+
+  @Column(name = "request_status")
+  private String requestStatus;
+
+  @Column(name = "return_code")
+  private Integer returnCode;
+
+  @Column(name = "return_message")
+  private String returnMessage;
 
   @ManyToOne
   @JoinColumns({
@@ -77,28 +86,60 @@ public class RequestScheduleBatchHostEntity {
     this.batchId = batchId;
   }
 
-  public String getHostName() {
-    return hostName;
+  public Long getRequestId() {
+    return requestId;
+  }
+
+  public void setRequestId(Long requestId) {
+    this.requestId = requestId;
+  }
+
+  public String getRequestType() {
+    return requestType;
+  }
+
+  public void setRequestType(String requestType) {
+    this.requestType = requestType;
+  }
+
+  public String getRequestUri() {
+    return requestUri;
+  }
+
+  public void setRequestUri(String requestUri) {
+    this.requestUri = requestUri;
+  }
+
+  public String getRequestBody() {
+    return requestBody;
+  }
+
+  public void setRequestBody(String requestBody) {
+    this.requestBody = requestBody;
+  }
+
+  public String getRequestStatus() {
+    return requestStatus;
   }
 
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
+  public void setRequestStatus(String requestStatus) {
+    this.requestStatus = requestStatus;
   }
 
-  public String getBatchName() {
-    return batchName;
+  public Integer getReturnCode() {
+    return returnCode;
   }
 
-  public void setBatchName(String batchName) {
-    this.batchName = batchName;
+  public void setReturnCode(Integer returnCode) {
+    this.returnCode = returnCode;
   }
 
-  public HostEntity getHostEntity() {
-    return hostEntity;
+  public String getReturnMessage() {
+    return returnMessage;
   }
 
-  public void setHostEntity(HostEntity hostEntity) {
-    this.hostEntity = hostEntity;
+  public void setReturnMessage(String returnMessage) {
+    this.returnMessage = returnMessage;
   }
 
   public RequestScheduleEntity getRequestScheduleEntity() {
@@ -114,10 +155,9 @@ public class RequestScheduleBatchHostEntity {
     if (this == o) return true;
     if (o == null || getClass() != o.getClass()) return false;
 
-    RequestScheduleBatchHostEntity that = (RequestScheduleBatchHostEntity) o;
+    RequestScheduleBatchRequestEntity that = (RequestScheduleBatchRequestEntity) o;
 
     if (!batchId.equals(that.batchId)) return false;
-    if (!hostName.equals(that.hostName)) return false;
     if (!scheduleId.equals(that.scheduleId)) return false;
 
     return true;
@@ -127,7 +167,6 @@ public class RequestScheduleBatchHostEntity {
   public int hashCode() {
     int result = scheduleId.hashCode();
     result = 31 * result + batchId.hashCode();
-    result = 31 * result + hostName.hashCode();
     return result;
   }
 }

+ 3 - 17
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchHostEntityPK.java → ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleBatchRequestEntityPK.java

@@ -17,15 +17,13 @@
  */
 package org.apache.ambari.server.orm.entities;
 
-
 import javax.persistence.Column;
 import javax.persistence.Id;
 import java.io.Serializable;
 
-public class RequestScheduleBatchHostEntityPK implements Serializable {
+public class RequestScheduleBatchRequestEntityPK implements Serializable {
   private Long scheduleId;
-  private Long  batchId;
-  private String hostName;
+  private Long batchId;
 
   @Id
   @Column(name = "schedule_id", nullable = false, insertable = true, updatable = true)
@@ -47,25 +45,14 @@ public class RequestScheduleBatchHostEntityPK implements Serializable {
     this.batchId = batchId;
   }
 
-  @Id
-  @Column(name = "host_name", nullable = false, insertable = true, updatable = true)
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;
     if (o == null || getClass() != o.getClass()) return false;
 
-    RequestScheduleBatchHostEntityPK that = (RequestScheduleBatchHostEntityPK) o;
+    RequestScheduleBatchRequestEntityPK that = (RequestScheduleBatchRequestEntityPK) o;
 
     if (!batchId.equals(that.batchId)) return false;
-    if (!hostName.equals(that.hostName)) return false;
     if (!scheduleId.equals(that.scheduleId)) return false;
 
     return true;
@@ -75,7 +62,6 @@ public class RequestScheduleBatchHostEntityPK implements Serializable {
   public int hashCode() {
     int result = scheduleId.hashCode();
     result = 31 * result + batchId.hashCode();
-    result = 31 * result + hostName.hashCode();
     return result;
   }
 }

+ 19 - 83
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestScheduleEntity.java

@@ -50,30 +50,12 @@ public class RequestScheduleEntity {
   @Column(name = "cluster_id", insertable = false, updatable = false, nullable = false)
   private Long clusterId;
 
-  @Column(name = "request_context")
-  private String requestContext;
+  @Column(name = "description")
+  private String description;
 
   @Column(name = "status")
   private String status;
 
-  @Column(name = "target_type")
-  private String targetType;
-
-  @Column(name = "target_name")
-  private String targetName;
-
-  @Column(name = "target_service")
-  private String targetService;
-
-  @Column(name = "target_component")
-  private String targetComponent;
-
-  @Column(name = "batch_requests_by_host")
-  private boolean batchRequestByHost;
-
-  @Column(name = "batch_host_count")
-  private Integer batchHostCount;
-
   @Column(name = "batch_separation_minutes")
   private Integer batchSeparationInMinutes;
 
@@ -111,10 +93,10 @@ public class RequestScheduleEntity {
   private String year;
 
   @Column(name = "starttime")
-  private Long startTime;
+  private String startTime;
 
   @Column(name = "endtime")
-  private Long endTime;
+  private String endTime;
 
   @Column(name = "last_execution_status")
   private String lastExecutionStatus;
@@ -124,7 +106,8 @@ public class RequestScheduleEntity {
   private ClusterEntity clusterEntity;
 
   @OneToMany(mappedBy = "requestScheduleEntity", cascade = CascadeType.ALL)
-  private Collection<RequestScheduleBatchHostEntity> requestScheduleBatchHostEntities;
+  private Collection<RequestScheduleBatchRequestEntity>
+    requestScheduleBatchRequestEntities;
 
   public long getScheduleId() {
     return scheduleId;
@@ -142,12 +125,12 @@ public class RequestScheduleEntity {
     this.clusterId = clusterId;
   }
 
-  public String getRequestContext() {
-    return requestContext;
+  public String getDescription() {
+    return description;
   }
 
-  public void setRequestContext(String request_context) {
-    this.requestContext = request_context;
+  public void setDescription(String description) {
+    this.description = description;
   }
 
   public String getStatus() {
@@ -158,54 +141,6 @@ public class RequestScheduleEntity {
     this.status = status;
   }
 
-  public String getTargetType() {
-    return targetType;
-  }
-
-  public void setTargetType(String targetType) {
-    this.targetType = targetType;
-  }
-
-  public String getTargetName() {
-    return targetName;
-  }
-
-  public void setTargetName(String targetName) {
-    this.targetName = targetName;
-  }
-
-  public String getTargetService() {
-    return targetService;
-  }
-
-  public void setTargetService(String targetService) {
-    this.targetService = targetService;
-  }
-
-  public String getTargetComponent() {
-    return targetComponent;
-  }
-
-  public void setTargetComponent(String targetComponent) {
-    this.targetComponent = targetComponent;
-  }
-
-  public boolean getIsBatchRequestByHost() {
-    return batchRequestByHost;
-  }
-
-  public void setBatchRequestByHost(boolean batchRequestByHost) {
-    this.batchRequestByHost = batchRequestByHost;
-  }
-
-  public Integer getBatchHostCount() {
-    return batchHostCount;
-  }
-
-  public void setBatchHostCount(Integer batchHostCount) {
-    this.batchHostCount = batchHostCount;
-  }
-
   public Integer getBatchSeparationInMinutes() {
     return batchSeparationInMinutes;
   }
@@ -302,19 +237,19 @@ public class RequestScheduleEntity {
     this.year = year;
   }
 
-  public Long getStartTime() {
+  public String getStartTime() {
     return startTime;
   }
 
-  public void setStartTime(Long startTime) {
+  public void setStartTime(String startTime) {
     this.startTime = startTime;
   }
 
-  public Long getEndTime() {
+  public String getEndTime() {
     return endTime;
   }
 
-  public void setEndTime(Long endTime) {
+  public void setEndTime(String endTime) {
     this.endTime = endTime;
   }
 
@@ -334,12 +269,13 @@ public class RequestScheduleEntity {
     this.clusterEntity = clusterEntity;
   }
 
-  public Collection<RequestScheduleBatchHostEntity> getRequestScheduleBatchHostEntities() {
-    return requestScheduleBatchHostEntities;
+  public Collection<RequestScheduleBatchRequestEntity> getRequestScheduleBatchRequestEntities() {
+    return requestScheduleBatchRequestEntities;
   }
 
-  public void setRequestScheduleBatchHostEntities(Collection<RequestScheduleBatchHostEntity> requestScheduleBatchHostEntities) {
-    this.requestScheduleBatchHostEntities = requestScheduleBatchHostEntities;
+  public void setRequestScheduleBatchRequestEntities(
+    Collection<RequestScheduleBatchRequestEntity> requestScheduleBatchRequestEntities) {
+    this.requestScheduleBatchRequestEntities = requestScheduleBatchRequestEntities;
   }
 
   @Override

+ 103 - 0
ambari-server/src/main/java/org/apache/ambari/server/scheduler/AbstractLinearExecutionJob.java

@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.scheduler;
+
+import org.apache.ambari.server.AmbariException;
+import org.quartz.DateBuilder;
+import org.quartz.DisallowConcurrentExecution;
+import org.quartz.JobDataMap;
+import org.quartz.JobExecutionContext;
+import org.quartz.JobExecutionException;
+import org.quartz.JobKey;
+import org.quartz.PersistJobDataAfterExecution;
+import org.quartz.Trigger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import static org.quartz.DateBuilder.futureDate;
+import static org.quartz.SimpleScheduleBuilder.simpleSchedule;
+import static org.quartz.TriggerBuilder.newTrigger;
+
+/**
+ * Job that knows how to get the job name and group out of the JobDataMap using
+ * pre-defined keys (constants) and contains code to schedule the identified job.
+ * This abstract Job's implementation of execute() delegates to an abstract
+ * template method "doWork()" (where the extending Job class's real work goes)
+ * and then it schedules the follow-up job.
+ */
+@PersistJobDataAfterExecution
+@DisallowConcurrentExecution
+public abstract class AbstractLinearExecutionJob implements ExecutionJob {
+  private ExecutionScheduleManager executionScheduleManager;
+  private static Logger LOG = LoggerFactory.getLogger(AbstractLinearExecutionJob.class);
+
+  public AbstractLinearExecutionJob(ExecutionScheduleManager executionScheduleManager) {
+    this.executionScheduleManager = executionScheduleManager;
+  }
+
+  /**
+   * Do the actual work of the fired job.
+   * @throws AmbariException
+   */
+  protected abstract void doWork() throws AmbariException;
+
+  /**
+   * Get the next job id from context and create a trigger to fire the next
+   * job.
+   * @param context
+   * @throws JobExecutionException
+   */
+  @Override
+  public void execute(JobExecutionContext context) throws JobExecutionException {
+    JobKey jobKey = context.getJobDetail().getKey();
+    LOG.debug("Executing linear job: " + jobKey);
+
+    if (!executionScheduleManager.continueOnMisfire(context)) {
+      throw new JobExecutionException("Canceled execution based on misfire"
+        + " toleration threshold, job: " + jobKey
+        + ", scheduleTime = " + context.getScheduledFireTime());
+    }
+
+    // Perform work and exit if failure reported
+    try {
+      doWork();
+    } catch (AmbariException e) {
+      LOG.error("Exception caught on job execution. Exiting linear chain...", e);
+      throw new JobExecutionException(e);
+    }
+
+    JobDataMap jobDataMap = context.getMergedJobDataMap();
+    String nextJobName = jobDataMap.getString(NEXT_EXECUTION_JOB_NAME_KEY);
+    String nextJobGroup = jobDataMap.getString(NEXT_EXECUTION_JOB_GROUP_KEY);
+    Integer separationMinutes = jobDataMap.getIntegerFromString(
+      (NEXT_EXECUTION_SEPARATION_MINUTES));
+
+    if (separationMinutes == null) {
+      separationMinutes = 0;
+    }
+
+    // Create trigger for next job execution
+    Trigger trigger = newTrigger()
+      .forJob(nextJobName, nextJobGroup)
+      .withIdentity("TriggerForJob-" + nextJobName, LINEAR_EXECUTION_TRIGGER_GROUP)
+      .withSchedule(simpleSchedule().withMisfireHandlingInstructionFireNow())
+      .startAt(futureDate(separationMinutes, DateBuilder.IntervalUnit.MINUTE))
+      .build();
+
+    executionScheduleManager.scheduleJob(trigger);
+  }
+}

+ 34 - 0
ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionJob.java

@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.scheduler;
+
+import org.quartz.Job;
+
+/**
+ * Type of Quartz Job that can be executed by the @ExecutionScheduleManager
+ */
+public interface ExecutionJob extends Job {
+  public static final String NEXT_EXECUTION_JOB_NAME_KEY = "ExecutionJob.Name";
+  public static final String NEXT_EXECUTION_JOB_GROUP_KEY = "ExecutionJob.Group";
+  public static final String NEXT_EXECUTION_SEPARATION_MINUTES =
+    "ExecutionJob.SeparationMinutes";
+  public static final String LINEAR_EXECUTION_JOB_GROUP =
+    "LinearExecutionJobs";
+  public static final String LINEAR_EXECUTION_TRIGGER_GROUP =
+    "LinearExecutionTriggers";
+}

+ 103 - 0
ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduleManager.java

@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.scheduler;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Singleton;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.utils.DateUtils;
+import org.quartz.Job;
+import org.quartz.JobExecutionContext;
+import org.quartz.SchedulerException;
+import org.quartz.Trigger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Date;
+
+/**
+ * This class handles scheduling request execution for managed clusters
+ */
+@Singleton
+public class ExecutionScheduleManager {
+  private static final Logger LOG = LoggerFactory.getLogger
+    (ExecutionScheduleManager.class);
+  @Inject
+  private ExecutionScheduler executionScheduler;
+  @Inject
+  private Configuration configuration;
+
+  private volatile boolean schedulerAvailable = false;
+
+  @Inject
+  public ExecutionScheduleManager(Injector injector) {
+    injector.injectMembers(this);
+  }
+
+  public void start() {
+    LOG.info("Starting scheduler");
+    try {
+      executionScheduler.startScheduler();
+      schedulerAvailable = true;
+    } catch (AmbariException e) {
+      LOG.warn("Unable to start scheduler. No recurring tasks will be " +
+        "scheduled.");
+    }
+  }
+
+  public void stop() {
+    LOG.info("Stopping scheduler");
+    schedulerAvailable = false;
+    try {
+      executionScheduler.stopScheduler();
+    } catch (AmbariException e) {
+      LOG.warn("Unable to stop scheduler. No new recurring tasks will be " +
+        "scheduled.");
+    }
+  }
+
+  public boolean isSchedulerAvailable() {
+    return schedulerAvailable;
+  }
+
+  public void scheduleJob(Trigger trigger) {
+    LOG.debug("Scheduling job: " + trigger.getJobKey());
+    if (isSchedulerAvailable()) {
+      try {
+        executionScheduler.scheduleJob(trigger);
+      } catch (SchedulerException e) {
+        LOG.error("Unable to add trigger for execution job: " + trigger
+          .getJobKey(), e);
+      }
+    } else {
+      LOG.error("Scheduler unavailable, cannot schedule jobs.");
+    }
+  }
+
+  public boolean continueOnMisfire(JobExecutionContext jobExecutionContext) {
+    if (jobExecutionContext != null) {
+      Date scheduledTime = jobExecutionContext.getScheduledFireTime();
+      Long diff = DateUtils.getDateDifferenceInMinutes(scheduledTime);
+      return (diff < configuration.getExecutionSchedulerMisfireToleration());
+    }
+    return true;
+  }
+}

+ 52 - 0
ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduler.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.scheduler;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.Schedule;
+import org.quartz.Job;
+import org.quartz.JobKey;
+import org.quartz.SchedulerException;
+import org.quartz.Trigger;
+
+public interface ExecutionScheduler {
+  /**
+   * Initialize and start the scheduler to accept jobs.
+   * @throws AmbariException
+   */
+  public void startScheduler() throws AmbariException;
+
+  /**
+   * Shutdown the scheduler threads and do not accept any more jobs.
+   * @throws AmbariException
+   */
+  public void stopScheduler() throws AmbariException;
+
+  /**
+   * Create a job based on the @RequestExecution and add a trigger for the
+   * created job based on the @Schedule. Schedule the job with the scheduler.
+   * @param requestExecution
+   * @param schedule
+   * @throws AmbariException
+   */
+  public void scheduleJob(RequestExecution requestExecution,
+                          Schedule schedule) throws AmbariException;
+
+  public void scheduleJob(Trigger trigger) throws SchedulerException;
+}

+ 170 - 0
ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java

@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.scheduler;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.Schedule;
+import org.quartz.Job;
+import org.quartz.JobKey;
+import org.quartz.Scheduler;
+import org.quartz.SchedulerException;
+import org.quartz.Trigger;
+import org.quartz.impl.StdSchedulerFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Properties;
+
+@Singleton
+public class ExecutionSchedulerImpl implements ExecutionScheduler {
+  @Inject
+  private Configuration configuration;
+  private Scheduler scheduler;
+  private static final Logger LOG = LoggerFactory.getLogger(ExecutionSchedulerImpl.class);
+  protected static final String DEFAULT_SCHEDULER_NAME = "ExecutionScheduler";
+  private static volatile boolean isInitialized = false;
+
+  @Inject
+  public ExecutionSchedulerImpl(Configuration configuration) {
+    this.configuration = configuration;
+  }
+
+  protected synchronized void initializeScheduler() {
+    StdSchedulerFactory sf = new StdSchedulerFactory();
+    Properties properties = getQuartzSchedulerProperties();
+    try {
+      sf.initialize(properties);
+    } catch (SchedulerException e) {
+      LOG.warn("Failed to initialize Request Execution Scheduler properties !");
+      LOG.debug("Scheduler properties: \n" + properties);
+      e.printStackTrace();
+      return;
+    }
+    try {
+      scheduler = sf.getScheduler();
+      isInitialized = true;
+    } catch (SchedulerException e) {
+      LOG.warn("Failed to create Request Execution scheduler !");
+      e.printStackTrace();
+    }
+  }
+
+  protected Properties getQuartzSchedulerProperties() {
+    Properties properties = new Properties();
+    properties.setProperty("org.quartz.scheduler.instanceName", DEFAULT_SCHEDULER_NAME);
+    properties.setProperty("org.quartz.scheduler.instanceId", "AUTO");
+    properties.setProperty("org.quartz.threadPool.class",
+      "org.quartz.simpl.SimpleThreadPool");
+    properties.setProperty("org.quartz.threadPool.threadCount",
+      configuration.getExecutionSchedulerThreads());
+
+    // Job Store Configuration
+    properties.setProperty("org.quartz.jobStore.class",
+      "org.quartz.impl.jdbcjobstore.JobStoreTX");
+    properties.setProperty("org.quartz.jobStore.isClustered",
+      configuration.isExecutionSchedulerClusterd());
+
+    String dbType = configuration.getServerDBName();
+    String dbDelegate = "org.quartz.impl.jdbcjobstore.StdJDBCDelegate";
+    String dbValidate = "select 0";
+
+    if (dbType.equals(Configuration.SERVER_DB_NAME_DEFAULT)) {
+      dbDelegate = "org.quartz.impl.jdbcjobstore.PostgreSQLDelegate";
+    } else if (dbType.equals(Configuration.ORACLE_DB_NAME)) {
+      dbDelegate = "org.quartz.impl.jdbcjobstore.oracle.OracleDelegate";
+      dbValidate = "select 0 from dual";
+    }
+    properties.setProperty("org.quartz.jobStore.driverDelegateClass", dbDelegate);
+    // Allow only strings in the jobDataMap which is serialized
+    properties.setProperty("org.quartz.jobStore.useProperties", "false");
+
+    // Data store configuration
+    properties.setProperty("org.quartz.jobStore.dataSource", "myDS");
+    properties.setProperty("org.quartz.dataSource.myDS.driver",
+      configuration.getDatabaseDriver());
+    properties.setProperty("org.quartz.dataSource.myDS.URL",
+      configuration.getDatabaseUrl());
+    properties.setProperty("org.quartz.dataSource.myDS.user",
+      configuration.getDatabaseUser());
+    properties.setProperty("org.quartz.dataSource.myDS.password",
+      configuration.getDatabasePassword());
+    properties.setProperty("org.quartz.dataSource.myDS.maxConnections",
+      configuration.getExecutionSchedulerConnections());
+    properties.setProperty("org.quartz.dataSource.myDS.validationQuery",
+      dbValidate);
+
+    // Skip update check
+    properties.setProperty("org.quartz.scheduler.skipUpdateCheck", "true");
+
+    return properties;
+  }
+
+  protected synchronized boolean isInitialized() {
+    return isInitialized;
+  }
+
+  @Override
+  public synchronized void startScheduler() throws AmbariException {
+    try {
+      if (!isInitialized) {
+        initializeScheduler();
+        isInitialized = true;
+      }
+    } catch (Exception e) {
+      String msg = "Unable to initialize Request Execution scheduler !";
+      LOG.warn(msg);
+      e.printStackTrace();
+      throw new AmbariException(msg);
+    }
+    try {
+      scheduler.start();
+    } catch (SchedulerException e) {
+      LOG.error("Failed to start scheduler", e);
+      throw new AmbariException(e.getMessage());
+    }
+  }
+
+  @Override
+  public synchronized void stopScheduler() throws AmbariException {
+    if (scheduler == null) {
+      throw new AmbariException("Scheduler not instantiated !");
+    }
+    try {
+      scheduler.shutdown();
+    } catch (SchedulerException e) {
+      LOG.error("Failed to stop scheduler", e);
+      throw new AmbariException(e.getMessage());
+    }
+  }
+
+  @Override
+  public void scheduleJob(RequestExecution requestExecution, Schedule schedule)
+      throws AmbariException {
+
+  }
+
+  @Override
+  public void scheduleJob(Trigger trigger) throws SchedulerException {
+    scheduler.scheduleJob(trigger);
+  }
+
+}

+ 21 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java

@@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReadWriteLock;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ClusterResponse;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
 
 public interface Cluster {
 
@@ -255,4 +256,24 @@ public interface Cluster {
    * @return Map of config group id to config group
    */
   public Map<Long, ConfigGroup> getConfigGroupsByHostname(String hostname) throws AmbariException;
+
+  /**
+   * Add a @RequestExecution to the cluster
+   * @param requestExecution
+   * @throws AmbariException
+   */
+  public void addRequestExecution(RequestExecution requestExecution) throws AmbariException;
+
+  /**
+   * Get all @RequestExecution objects associated with the cluster
+   * @return
+   */
+  public Map<Long, RequestExecution> getAllRequestExecutions();
+
+  /**
+   * Delete a @RequestExecution associated with the cluster
+   * @param id
+   * @throws AmbariException
+   */
+  public void deleteRequestExecution(Long id) throws AmbariException;
 }

+ 223 - 116
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java

@@ -39,6 +39,7 @@ import org.apache.ambari.server.orm.entities.ClusterStateEntity;
 import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity;
 import org.apache.ambari.server.orm.entities.HostConfigMappingEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -51,8 +52,11 @@ import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import javax.persistence.RollbackException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -65,15 +69,15 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.CopyOnWriteArrayList;
 
 public class ClusterImpl implements Cluster {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(ClusterImpl.class);
+    LoggerFactory.getLogger(ClusterImpl.class);
 
   @Inject
   private Clusters clusters;
@@ -86,24 +90,29 @@ public class ClusterImpl implements Cluster {
    * [ Config Type -> [ Config Version Tag -> Config ] ]
    */
   private Map<String, Map<String, Config>> allConfigs;
-  
+
   /**
    * [ ServiceName -> [ ServiceComponentName -> [ HostName -> [ ... ] ] ] ]
    */
   private Map<String, Map<String, Map<String, ServiceComponentHost>>>
-      serviceComponentHosts;
+    serviceComponentHosts;
 
   /**
    * [ HostName -> [ ... ] ]
    */
   private Map<String, List<ServiceComponentHost>>
-      serviceComponentHostsByHost;
+    serviceComponentHostsByHost;
 
   /**
    * Map of existing config groups
    */
   private Map<Long, ConfigGroup> clusterConfigGroups;
 
+  /**
+   * Map of Request schedules for this cluster
+   */
+  private Map<Long, RequestExecution> requestExecutions;
+
   private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   private Lock readLock = readWriteLock.readLock();
   private Lock writeLock = readWriteLock.writeLock();
@@ -128,6 +137,8 @@ public class ClusterImpl implements Cluster {
   private ConfigGroupFactory configGroupFactory;
   @Inject
   private ConfigGroupHostMappingDAO configGroupHostMappingDAO;
+  @Inject
+  private RequestExecutionFactory requestExecutionFactory;
 
   private volatile boolean svcHostsLoaded = false;
 
@@ -138,11 +149,11 @@ public class ClusterImpl implements Cluster {
     this.clusterEntity = clusterEntity;
 
     this.serviceComponentHosts = new HashMap<String,
-        Map<String, Map<String, ServiceComponentHost>>>();
+      Map<String, Map<String, ServiceComponentHost>>>();
     this.serviceComponentHostsByHost = new HashMap<String,
-        List<ServiceComponentHost>>();
+      List<ServiceComponentHost>>();
     this.desiredStackVersion = gson.fromJson(
-        clusterEntity.getDesiredStackVersion(), StackId.class);
+      clusterEntity.getDesiredStackVersion(), StackId.class);
     allConfigs = new HashMap<String, Map<String, Config>>();
     if (!clusterEntity.getClusterConfigEntities().isEmpty()) {
       for (ClusterConfigEntity entity : clusterEntity.getClusterConfigEntities()) {
@@ -183,32 +194,32 @@ public class ClusterImpl implements Cluster {
             Service service = serviceKV.getValue();
             if (!serviceComponentHosts.containsKey(service.getName())) {
               serviceComponentHosts.put(service.getName(), new HashMap<String,
-                  Map<String, ServiceComponentHost>>());
+                Map<String, ServiceComponentHost>>());
             }
             for (Entry<String, ServiceComponent> svcComponent :
-                service.getServiceComponents().entrySet()) {
+              service.getServiceComponents().entrySet()) {
               ServiceComponent comp = svcComponent.getValue();
               String componentName = svcComponent.getKey();
               if (!serviceComponentHosts.get(service.getName()).containsKey(componentName)) {
                 serviceComponentHosts.get(service.getName()).put(componentName,
-                    new HashMap<String, ServiceComponentHost>());
+                  new HashMap<String, ServiceComponentHost>());
               }
               /** Get Service Host Components **/
               for (Entry<String, ServiceComponentHost> svchost :
-                  comp.getServiceComponentHosts().entrySet()) {
+                comp.getServiceComponentHosts().entrySet()) {
                 String hostname = svchost.getKey();
                 ServiceComponentHost svcHostComponent = svchost.getValue();
                 if (!serviceComponentHostsByHost.containsKey(hostname)) {
                   serviceComponentHostsByHost.put(hostname,
-                      new ArrayList<ServiceComponentHost>());
+                    new ArrayList<ServiceComponentHost>());
                 }
                 List<ServiceComponentHost> compList = serviceComponentHostsByHost.get(hostname);
                 compList.add(svcHostComponent);
 
                 if (!serviceComponentHosts.get(service.getName()).get(componentName)
-                    .containsKey(hostname)) {
+                  .containsKey(hostname)) {
                   serviceComponentHosts.get(service.getName()).get(componentName)
-                      .put(hostname, svcHostComponent);
+                    .put(hostname, svcHostComponent);
                 }
               }
             }
@@ -275,6 +286,31 @@ public class ClusterImpl implements Cluster {
     }
   }
 
+  private void loadRequestExecutions() {
+    if (requestExecutions == null) {
+      clusterGlobalLock.writeLock().lock();
+      try {
+        writeLock.lock();
+        try {
+          if (requestExecutions == null) {
+            requestExecutions = new HashMap<Long, RequestExecution>();
+            if (!clusterEntity.getRequestScheduleEntities().isEmpty()) {
+              for (RequestScheduleEntity scheduleEntity : clusterEntity
+                  .getRequestScheduleEntities()) {
+                requestExecutions.put(scheduleEntity.getScheduleId(),
+                  requestExecutionFactory.createExisting(this, scheduleEntity));
+              }
+            }
+          }
+        } finally {
+          writeLock.unlock();
+        }
+      } finally {
+        clusterGlobalLock.writeLock().unlock();
+      }
+    }
+  }
+
   @Override
   public void addConfigGroup(ConfigGroup configGroup) throws AmbariException {
     loadConfigGroups();
@@ -353,6 +389,77 @@ public class ClusterImpl implements Cluster {
     }
   }
 
+  @Override
+  public void addRequestExecution(RequestExecution requestExecution) throws AmbariException {
+    loadRequestExecutions();
+    clusterGlobalLock.writeLock().lock();
+    try {
+      writeLock.lock();
+      try {
+        LOG.info("Adding a new request schedule"
+            + ", clusterName = " + getClusterName()
+            + ", id = " + requestExecution.getId()
+            + ", description = " + requestExecution.getDescription());
+
+        if (requestExecutions.containsKey(requestExecution.getId())) {
+          LOG.debug("Request schedule already exists"
+            + ", clusterName = " + getClusterName()
+            + ", id = " + requestExecution.getId()
+            + ", description = " + requestExecution.getDescription());
+        } else {
+          requestExecutions.put(requestExecution.getId(), requestExecution);
+        }
+      } finally {
+        writeLock.unlock();
+      }
+    } finally {
+      clusterGlobalLock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public Map<Long, RequestExecution> getAllRequestExecutions() {
+    loadRequestExecutions();
+    clusterGlobalLock.readLock().lock();
+    try {
+      readLock.lock();
+      try {
+        return Collections.unmodifiableMap(requestExecutions);
+      } finally {
+        readLock.unlock();
+      }
+    } finally {
+      clusterGlobalLock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void deleteRequestExecution(Long id) throws AmbariException {
+    loadRequestExecutions();
+    clusterGlobalLock.writeLock().lock();
+    try {
+      readWriteLock.writeLock().lock();
+      try {
+        RequestExecution requestExecution = requestExecutions.get(id);
+        if (requestExecution == null) {
+          throw new AmbariException("Request schedule does not exists, " +
+            "id = " + id);
+        }
+        LOG.info("Deleting request schedule"
+          + ", clusterName = " + getClusterName()
+          + ", id = " + requestExecution.getId()
+          + ", description = " + requestExecution.getDescription());
+
+        requestExecution.delete();
+        requestExecutions.remove(id);
+      } finally {
+        readWriteLock.writeLock().unlock();
+      }
+    } finally {
+      clusterGlobalLock.writeLock().unlock();
+    }
+  }
+
   @Override
   public void deleteConfigGroup(Long id) throws AmbariException {
     loadConfigGroups();
@@ -382,22 +489,22 @@ public class ClusterImpl implements Cluster {
   }
 
   public ServiceComponentHost getServiceComponentHost(String serviceName,
-      String serviceComponentName, String hostname) throws AmbariException {
+                                                      String serviceComponentName, String hostname) throws AmbariException {
     loadServiceHostComponents();
     clusterGlobalLock.readLock().lock();
     try {
       readLock.lock();
       try {
         if (!serviceComponentHosts.containsKey(serviceName)
-            || !serviceComponentHosts.get(serviceName)
-            .containsKey(serviceComponentName)
-            || !serviceComponentHosts.get(serviceName).get(serviceComponentName)
-            .containsKey(hostname)) {
+          || !serviceComponentHosts.get(serviceName)
+          .containsKey(serviceComponentName)
+          || !serviceComponentHosts.get(serviceName).get(serviceComponentName)
+          .containsKey(hostname)) {
           throw new ServiceComponentHostNotFoundException(getClusterName(), serviceName,
-              serviceComponentName, hostname);
+            serviceComponentName, hostname);
         }
         return serviceComponentHosts.get(serviceName).get(serviceComponentName)
-            .get(hostname);
+          .get(hostname);
       } finally {
         readLock.unlock();
       }
@@ -443,7 +550,7 @@ public class ClusterImpl implements Cluster {
   }
 
   public void addServiceComponentHost(
-      ServiceComponentHost svcCompHost) throws AmbariException {
+    ServiceComponentHost svcCompHost) throws AmbariException {
     loadServiceHostComponents();
     clusterGlobalLock.writeLock().lock();
     try {
@@ -451,9 +558,9 @@ public class ClusterImpl implements Cluster {
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Trying to add ServiceComponentHost to ClusterHostMap cache"
-              + ", serviceName=" + svcCompHost.getServiceName()
-              + ", componentName=" + svcCompHost.getServiceComponentName()
-              + ", hostname=" + svcCompHost.getHostName());
+            + ", serviceName=" + svcCompHost.getServiceName()
+            + ", componentName=" + svcCompHost.getServiceComponentName()
+            + ", hostname=" + svcCompHost.getHostName());
         }
 
         final String hostname = svcCompHost.getHostName();
@@ -471,44 +578,44 @@ public class ClusterImpl implements Cluster {
         }
         if (!clusterFound) {
           throw new AmbariException("Host does not belong this cluster"
-              + ", hostname=" + hostname
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId());
+            + ", hostname=" + hostname
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId());
         }
 
         if (!serviceComponentHosts.containsKey(serviceName)) {
           serviceComponentHosts.put(serviceName,
-              new HashMap<String, Map<String, ServiceComponentHost>>());
+            new HashMap<String, Map<String, ServiceComponentHost>>());
         }
         if (!serviceComponentHosts.get(serviceName).containsKey(componentName)) {
           serviceComponentHosts.get(serviceName).put(componentName,
-              new HashMap<String, ServiceComponentHost>());
+            new HashMap<String, ServiceComponentHost>());
         }
 
         if (serviceComponentHosts.get(serviceName).get(componentName).
-            containsKey(hostname)) {
+          containsKey(hostname)) {
           throw new AmbariException("Duplicate entry for ServiceComponentHost"
-              + ", serviceName=" + serviceName
-              + ", serviceComponentName" + componentName
-              + ", hostname= " + hostname);
+            + ", serviceName=" + serviceName
+            + ", serviceComponentName" + componentName
+            + ", hostname= " + hostname);
         }
 
         if (!serviceComponentHostsByHost.containsKey(hostname)) {
           serviceComponentHostsByHost.put(hostname,
-              new ArrayList<ServiceComponentHost>());
+            new ArrayList<ServiceComponentHost>());
         }
 
         if (LOG.isDebugEnabled()) {
           LOG.debug("Adding a new ServiceComponentHost"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", serviceName=" + serviceName
-              + ", serviceComponentName" + componentName
-              + ", hostname= " + hostname);
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", serviceName=" + serviceName
+            + ", serviceComponentName" + componentName
+            + ", hostname= " + hostname);
         }
 
         serviceComponentHosts.get(serviceName).get(componentName).put(hostname,
-            svcCompHost);
+          svcCompHost);
         serviceComponentHostsByHost.get(hostname).add(svcCompHost);
       } finally {
         writeLock.unlock();
@@ -521,7 +628,7 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public void removeServiceComponentHost(ServiceComponentHost svcCompHost)
-      throws AmbariException {
+    throws AmbariException {
     loadServiceHostComponents();
     clusterGlobalLock.writeLock().lock();
     try {
@@ -529,9 +636,9 @@ public class ClusterImpl implements Cluster {
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Trying to remove ServiceComponentHost to ClusterHostMap cache"
-              + ", serviceName=" + svcCompHost.getServiceName()
-              + ", componentName=" + svcCompHost.getServiceComponentName()
-              + ", hostname=" + svcCompHost.getHostName());
+            + ", serviceName=" + svcCompHost.getServiceName()
+            + ", componentName=" + svcCompHost.getServiceComponentName()
+            + ", hostname=" + svcCompHost.getHostName());
         }
 
         final String hostname = svcCompHost.getHostName();
@@ -549,32 +656,32 @@ public class ClusterImpl implements Cluster {
         }
         if (!clusterFound) {
           throw new AmbariException("Host does not belong this cluster"
-              + ", hostname=" + hostname
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId());
+            + ", hostname=" + hostname
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId());
         }
 
         if (!serviceComponentHosts.containsKey(serviceName)
-            || !serviceComponentHosts.get(serviceName).containsKey(componentName)
-            || !serviceComponentHosts.get(serviceName).get(componentName).
-            containsKey(hostname)) {
+          || !serviceComponentHosts.get(serviceName).containsKey(componentName)
+          || !serviceComponentHosts.get(serviceName).get(componentName).
+          containsKey(hostname)) {
           throw new AmbariException("Invalid entry for ServiceComponentHost"
-              + ", serviceName=" + serviceName
-              + ", serviceComponentName" + componentName
-              + ", hostname= " + hostname);
+            + ", serviceName=" + serviceName
+            + ", serviceComponentName" + componentName
+            + ", hostname= " + hostname);
         }
         if (!serviceComponentHostsByHost.containsKey(hostname)) {
           throw new AmbariException("Invalid host entry for ServiceComponentHost"
-              + ", serviceName=" + serviceName
-              + ", serviceComponentName" + componentName
-              + ", hostname= " + hostname);
+            + ", serviceName=" + serviceName
+            + ", serviceComponentName" + componentName
+            + ", hostname= " + hostname);
         }
 
         ServiceComponentHost schToRemove = null;
         for (ServiceComponentHost sch : serviceComponentHostsByHost.get(hostname)) {
           if (sch.getServiceName().equals(serviceName)
-              && sch.getServiceComponentName().equals(componentName)
-              && sch.getHostName().equals(hostname)) {
+            && sch.getServiceComponentName().equals(componentName)
+            && sch.getHostName().equals(hostname)) {
             schToRemove = sch;
             break;
           }
@@ -582,18 +689,18 @@ public class ClusterImpl implements Cluster {
 
         if (schToRemove == null) {
           LOG.warn("Unavailable in per host cache. ServiceComponentHost"
-              + ", serviceName=" + serviceName
-              + ", serviceComponentName" + componentName
-              + ", hostname= " + hostname);
+            + ", serviceName=" + serviceName
+            + ", serviceComponentName" + componentName
+            + ", hostname= " + hostname);
         }
 
         if (LOG.isDebugEnabled()) {
           LOG.debug("Removing a ServiceComponentHost"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", serviceName=" + serviceName
-              + ", serviceComponentName" + componentName
-              + ", hostname= " + hostname);
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", serviceName=" + serviceName
+            + ", serviceComponentName" + componentName
+            + ", hostname= " + hostname);
         }
 
         serviceComponentHosts.get(serviceName).get(componentName).remove(hostname);
@@ -627,7 +734,7 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public List<ServiceComponentHost> getServiceComponentHosts(
-      String hostname) {
+    String hostname) {
     loadServiceHostComponents();
     clusterGlobalLock.readLock().lock();
     try {
@@ -648,7 +755,7 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public void addService(Service service)
-      throws AmbariException {
+    throws AmbariException {
     loadServices();
     clusterGlobalLock.writeLock().lock();
     try {
@@ -656,15 +763,15 @@ public class ClusterImpl implements Cluster {
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Adding a new Service"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", serviceName=" + service.getName());
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", serviceName=" + service.getName());
         }
         if (services.containsKey(service.getName())) {
           throw new AmbariException("Service already exists"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", serviceName=" + service.getName());
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", serviceName=" + service.getName());
         }
         this.services.put(service.getName(), service);
       } finally {
@@ -677,7 +784,7 @@ public class ClusterImpl implements Cluster {
   }
 
   @Override
-  public Service addService(String serviceName) throws AmbariException{
+  public Service addService(String serviceName) throws AmbariException {
     loadServices();
     clusterGlobalLock.writeLock().lock();
     try {
@@ -685,15 +792,15 @@ public class ClusterImpl implements Cluster {
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Adding a new Service"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", serviceName=" + serviceName);
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", serviceName=" + serviceName);
         }
         if (services.containsKey(serviceName)) {
           throw new AmbariException("Service already exists"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", serviceName=" + serviceName);
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", serviceName=" + serviceName);
         }
         Service s = serviceFactory.createNew(this, serviceName);
         this.services.put(s.getName(), s);
@@ -709,7 +816,7 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public Service getService(String serviceName)
-      throws AmbariException {
+    throws AmbariException {
     loadServices();
     clusterGlobalLock.readLock().lock();
     try {
@@ -769,10 +876,10 @@ public class ClusterImpl implements Cluster {
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Changing DesiredStackVersion of Cluster"
-              + ", clusterName=" + getClusterName()
-              + ", clusterId=" + getClusterId()
-              + ", currentDesiredStackVersion=" + this.desiredStackVersion
-              + ", newDesiredStackVersion=" + stackVersion);
+            + ", clusterName=" + getClusterName()
+            + ", clusterId=" + getClusterId()
+            + ", currentDesiredStackVersion=" + this.desiredStackVersion
+            + ", newDesiredStackVersion=" + stackVersion);
         }
         this.desiredStackVersion = stackVersion;
         clusterEntity.setDesiredStackVersion(gson.toJson(stackVersion));
@@ -812,7 +919,7 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public void setCurrentStackVersion(StackId stackVersion)
-  throws AmbariException {
+    throws AmbariException {
     clusterGlobalLock.readLock().lock();
     try {
       writeLock.lock();
@@ -835,8 +942,8 @@ public class ClusterImpl implements Cluster {
       } catch (RollbackException e) {
         LOG.warn("Unable to set version " + stackVersion + " for cluster " + getClusterName());
         throw new AmbariException("Unable to set"
-            + " version=" + stackVersion
-            + " for cluster " + getClusterName(), e);
+          + " version=" + stackVersion
+          + " for cluster " + getClusterName(), e);
       } finally {
         writeLock.unlock();
       }
@@ -872,7 +979,7 @@ public class ClusterImpl implements Cluster {
       readWriteLock.readLock().lock();
       try {
         if (!allConfigs.containsKey(configType)
-            || !allConfigs.get(configType).containsKey(versionTag)) {
+          || !allConfigs.get(configType).containsKey(versionTag)) {
           return null;
         }
         return allConfigs.get(configType).get(versionTag);
@@ -892,9 +999,9 @@ public class ClusterImpl implements Cluster {
       readWriteLock.writeLock().lock();
       try {
         if (config.getType() == null
-            || config.getType().isEmpty()
-            || config.getVersionTag() == null
-            || config.getVersionTag().isEmpty()) {
+          || config.getType().isEmpty()
+          || config.getVersionTag() == null
+          || config.getVersionTag().isEmpty()) {
           // TODO throw error
         }
         if (!allConfigs.containsKey(config.getType())) {
@@ -935,14 +1042,14 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public ClusterResponse convertToResponse()
-      throws AmbariException {
+    throws AmbariException {
     clusterGlobalLock.readLock().lock();
     try {
       readWriteLock.readLock().lock();
       try {
         ClusterResponse r = new ClusterResponse(getClusterId(), getClusterName(),
-            clusters.getHostsForCluster(getClusterName()).keySet(),
-            getDesiredStackVersion().getStackId());
+          clusters.getHostsForCluster(getClusterName()).keySet(),
+          getDesiredStackVersion().getStackId());
 
         return r;
       } finally {
@@ -962,9 +1069,9 @@ public class ClusterImpl implements Cluster {
       readWriteLock.readLock().lock();
       try {
         sb.append("Cluster={ clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", desiredStackVersion=" + desiredStackVersion.getStackId()
-            + ", services=[ ");
+          + ", clusterId=" + getClusterId()
+          + ", desiredStackVersion=" + desiredStackVersion.getStackId()
+          + ", services=[ ");
         boolean first = true;
         for (Service s : services.values()) {
           if (!first) {
@@ -1012,13 +1119,13 @@ public class ClusterImpl implements Cluster {
       readWriteLock.writeLock().lock();
       try {
         LOG.info("Deleting all services for cluster"
-            + ", clusterName=" + getClusterName());
+          + ", clusterName=" + getClusterName());
         for (Service service : services.values()) {
           if (!service.canBeRemoved()) {
             throw new AmbariException("Found non removable service when trying to"
-                + " all services from cluster"
-                + ", clusterName=" + getClusterName()
-                + ", serviceName=" + service.getName());
+              + " all services from cluster"
+              + ", clusterName=" + getClusterName()
+              + ", serviceName=" + service.getName());
           }
         }
 
@@ -1038,7 +1145,7 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public void deleteService(String serviceName)
-      throws AmbariException {
+    throws AmbariException {
     loadServices();
     clusterGlobalLock.writeLock().lock();
     try {
@@ -1046,13 +1153,13 @@ public class ClusterImpl implements Cluster {
       try {
         Service service = getService(serviceName);
         LOG.info("Deleting service for cluster"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + service.getName());
+          + ", clusterName=" + getClusterName()
+          + ", serviceName=" + service.getName());
         // FIXME check dependencies from meta layer
         if (!service.canBeRemoved()) {
           throw new AmbariException("Could not delete service from cluster"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + service.getName());
+            + ", clusterName=" + getClusterName()
+            + ", serviceName=" + service.getName());
         }
         service.delete();
         services.remove(serviceName);
@@ -1077,8 +1184,8 @@ public class ClusterImpl implements Cluster {
           if (!service.canBeRemoved()) {
             safeToRemove = false;
             LOG.warn("Found non removable service"
-                + ", clusterName=" + getClusterName()
-                + ", serviceName=" + service.getName());
+              + ", clusterName=" + getClusterName()
+              + ", serviceName=" + service.getName());
           }
         }
         return safeToRemove;
@@ -1162,7 +1269,7 @@ public class ClusterImpl implements Cluster {
 
 
   }
-  
+
   @Override
   public Map<String, DesiredConfig> getDesiredConfigs() {
     clusterGlobalLock.readLock().lock();
@@ -1186,13 +1293,13 @@ public class ClusterImpl implements Cluster {
 
         if (!map.isEmpty()) {
           Map<String, List<HostConfigMappingEntity>> hostMappingsByType =
-              hostConfigMappingDAO.findSelectedHostsByTypes(clusterEntity.getClusterId(), types);
+            hostConfigMappingDAO.findSelectedHostsByTypes(clusterEntity.getClusterId(), types);
 
           for (Entry<String, DesiredConfig> entry : map.entrySet()) {
             List<DesiredConfig.HostOverride> hostOverrides = new ArrayList<DesiredConfig.HostOverride>();
             for (HostConfigMappingEntity mappingEntity : hostMappingsByType.get(entry.getKey())) {
               hostOverrides.add(new DesiredConfig.HostOverride(mappingEntity.getHostName(),
-                  mappingEntity.getVersion()));
+                mappingEntity.getVersion()));
             }
             entry.getValue().setHostOverrides(hostOverrides);
           }
@@ -1239,7 +1346,7 @@ public class ClusterImpl implements Cluster {
     }
 
     List<HostConfigMappingEntity> mappingEntities =
-        hostConfigMappingDAO.findSelectedByHosts(clusterEntity.getClusterId(), hostnames);
+      hostConfigMappingDAO.findSelectedByHosts(clusterEntity.getClusterId(), hostnames);
 
     Map<String, Map<String, DesiredConfig>> desiredConfigsByHost = new HashMap<String, Map<String, DesiredConfig>>();
 

+ 0 - 5
ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java

@@ -26,9 +26,7 @@ import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.controller.ConfigGroupResponse;
-import org.apache.ambari.server.controller.internal.ConfigGroupResourceProvider;
 import org.apache.ambari.server.controller.internal.ConfigurationResourceProvider;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupDAO;
@@ -47,14 +45,11 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
-import org.eclipse.persistence.sessions.UnitOfWork;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;

+ 46 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/Batch.java

@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class Batch {
+  private final List<BatchRequest> batchRequests = new ArrayList<BatchRequest>();
+  private BatchSettings batchSettings;
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("batch_requests")
+  public List<BatchRequest> getBatchRequests() {
+    return batchRequests;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("batch_settings")
+  public BatchSettings getBatchSettings() {
+    return batchSettings;
+  }
+
+  public void setBatchSettings(BatchSettings batchSettings) {
+    this.batchSettings = batchSettings;
+  }
+
+}

+ 109 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/BatchRequest.java

@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+public class BatchRequest implements Comparable<BatchRequest> {
+  private Long orderId;
+  private Type type;
+  private String uri;
+  private String body;
+  private String status;
+  private Integer returnCode;
+  private String responseMsg;
+
+  @JsonProperty("order_id")
+  public Long getOrderId() {
+    return orderId;
+  }
+
+  public void setOrderId(Long orderId) {
+    this.orderId = orderId;
+  }
+
+  @JsonProperty("request_type")
+  public String getType() {
+    return type.name();
+  }
+
+  public void setType(Type type) {
+    this.type = type;
+  }
+
+  @JsonProperty("request_uri")
+  public String getUri() {
+    return uri;
+  }
+
+  public void setUri(String uri) {
+    this.uri = uri;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("request_body")
+  public String getBody() {
+    return body;
+  }
+
+  public void setBody(String body) {
+    this.body = body;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("request_status")
+  public String getStatus() {
+    return status;
+  }
+
+  public void setStatus(String status) {
+    this.status = status;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("return_code")
+  public Integer getReturnCode() {
+    return returnCode;
+  }
+
+  public void setReturnCode(Integer returnCode) {
+    this.returnCode = returnCode;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("response_message")
+  public String getResponseMsg() {
+    return responseMsg;
+  }
+
+  public void setResponseMsg(String responseMsg) {
+    this.responseMsg = responseMsg;
+  }
+
+  @Override
+  public int compareTo(BatchRequest batchRequest) {
+    return this.orderId.compareTo(batchRequest.getOrderId());
+  }
+
+  public enum Type {
+    PUT,
+    POST,
+    DELETE
+  }
+}

+ 34 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/BatchRequestJob.java

@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.scheduler.AbstractLinearExecutionJob;
+import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
+
+public class BatchRequestJob extends AbstractLinearExecutionJob {
+
+  public BatchRequestJob(ExecutionScheduleManager executionScheduleManager) {
+    super(executionScheduleManager);
+  }
+
+  @Override
+  protected void doWork() throws AmbariException {
+
+  }
+}

+ 47 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/BatchSettings.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+public class BatchSettings {
+  private Integer batchSeparationInMinutes;
+  private Integer taskFailureTolerance;
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("batch_separation_in_minutes")
+  public Integer getBatchSeparationInMinutes() {
+    return batchSeparationInMinutes;
+  }
+
+  public void setBatchSeparationInMinutes(Integer batchSeparationInMinutes) {
+    this.batchSeparationInMinutes = batchSeparationInMinutes;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("task_failure_tolerance_limit")
+  public Integer getTaskFailureToleranceLimit() {
+    return taskFailureTolerance;
+  }
+
+  public void setTaskFailureToleranceLimit(Integer taskFailureTolerance) {
+    this.taskFailureTolerance = taskFailureTolerance;
+  }
+
+}

+ 145 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecution.java

@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import org.apache.ambari.server.controller.RequestScheduleResponse;
+
+/**
+ * Request Execution is a type of resource that supports scheduling a request
+ * or a group of requests for execution by the ActionManager.
+ */
+public interface RequestExecution {
+  /**
+   * Primary key of Request Execution
+   * @return
+   */
+  public Long getId();
+
+  /**
+   * Cluster name to which request schedule belongs
+   * @return
+   */
+  public String getClusterName();
+
+  /**
+   * Get the batch of requests along with batch settings
+   * @return
+   */
+  public Batch getBatch();
+
+  /**
+   * Set batch of requests and batch settings
+   */
+  public void setBatch(Batch batch);
+
+  /**
+   * Get schedule for the execution
+   * @return
+   */
+  public Schedule getSchedule();
+
+  /**
+   * Set schedule for the execution
+   */
+  public void setSchedule(Schedule schedule);
+
+  /**
+   * Get @RequestScheduleResponse for this Request Execution
+   * @return
+   */
+  public RequestScheduleResponse convertToResponse();
+
+  /**
+   * Persist the Request Execution and schedule
+   */
+  public void persist();
+
+  /**
+   * Refresh entity from DB.
+   */
+  public void refresh();
+
+  /**
+   * Delete Request Schedule entity
+   */
+  public void delete();
+
+  /**
+   * Get status of schedule
+   */
+  public String getStatus();
+
+  /**
+   * Set request execution description
+   */
+  public void setDescription(String description);
+
+  /**
+   * Get description of the request execution
+   */
+  public String getDescription();
+
+  /**
+   * Set status of the schedule
+   */
+  public void setStatus(Status status);
+
+  /**
+   * Set datetime:status of last request that was executed
+   */
+  public void setLastExecutionStatus(String status);
+
+  /**
+   * Set create username
+   */
+  public void setCreateUser(String username);
+
+  /**
+   * Set create username
+   */
+  public void setUpdateUser(String username);
+
+  /**
+   * Get created time
+   */
+  public String getCreateTime();
+
+  /**
+   * Get updated time
+   */
+  public String getUpdateTime();
+
+  /**
+   * Get create user
+   */
+  public String getCreateUser();
+
+  /**
+   * Get update user
+   */
+  public String getUpdateUser();
+
+  /**
+   * Status of the Request execution
+   */
+  public enum Status {
+    SCHEDULED,
+    COMPLETED,
+    DISABLED
+  }
+}

+ 31 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecutionFactory.java

@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import com.google.inject.assistedinject.Assisted;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
+import org.apache.ambari.server.state.Cluster;
+
+public interface RequestExecutionFactory {
+  RequestExecution createNew(@Assisted("cluster") Cluster cluster,
+                             @Assisted("batch") Batch batch,
+                             @Assisted("schedule") Schedule schedule);
+
+  RequestExecution createExisting(Cluster cluster,
+                                  RequestScheduleEntity requestScheduleEntity);
+}

+ 373 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecutionImpl.java

@@ -0,0 +1,373 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import com.google.gson.Gson;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.controller.RequestScheduleResponse;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.dao.RequestScheduleBatchRequestDAO;
+import org.apache.ambari.server.orm.dao.RequestScheduleDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchRequestEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.utils.DateUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+public class RequestExecutionImpl implements RequestExecution {
+  private Cluster cluster;
+  private Batch batch;
+  private Schedule schedule;
+  private RequestScheduleEntity requestScheduleEntity;
+  private volatile boolean isPersisted = false;
+
+  @Inject
+  private Gson gson;
+  @Inject
+  private Clusters clusters;
+  @Inject
+  private RequestScheduleDAO requestScheduleDAO;
+  @Inject
+  private RequestScheduleBatchRequestDAO batchRequestDAO;
+  @Inject
+  private ClusterDAO clusterDAO;
+  @Inject
+  private HostDAO hostDAO;
+
+  private static final Logger LOG = LoggerFactory.getLogger(RequestExecutionImpl.class);
+  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+
+  @AssistedInject
+  public RequestExecutionImpl(@Assisted("cluster") Cluster cluster,
+                              @Assisted("batch") Batch batch,
+                              @Assisted("schedule") Schedule schedule,
+                              Injector injector) {
+    this.cluster = cluster;
+    this.batch = batch;
+    this.schedule = schedule;
+    injector.injectMembers(this);
+
+    // Initialize the Entity object
+    // Batch Hosts is initialized on persist
+    requestScheduleEntity = new RequestScheduleEntity();
+    requestScheduleEntity.setClusterId(cluster.getClusterId());
+
+    updateBatchSettings();
+
+    updateSchedule();
+  }
+
+  @AssistedInject
+  public RequestExecutionImpl(@Assisted Cluster cluster,
+                              @Assisted RequestScheduleEntity requestScheduleEntity,
+                              Injector injector) {
+    this.cluster = cluster;
+    injector.injectMembers(this);
+
+    this.requestScheduleEntity = requestScheduleEntity;
+
+    batch = new Batch();
+    schedule = new Schedule();
+
+    BatchSettings batchSettings = new BatchSettings();
+    batchSettings.setBatchSeparationInMinutes(requestScheduleEntity.getBatchSeparationInMinutes());
+    batchSettings.setTaskFailureToleranceLimit(requestScheduleEntity.getBatchTolerationLimit());
+
+    batch.setBatchSettings(batchSettings);
+
+    Collection<RequestScheduleBatchRequestEntity> batchRequestEntities =
+      requestScheduleEntity.getRequestScheduleBatchRequestEntities();
+    if (batchRequestEntities != null) {
+      for (RequestScheduleBatchRequestEntity batchRequestEntity :
+          batchRequestEntities) {
+        BatchRequest batchRequest = new BatchRequest();
+        batchRequest.setType(BatchRequest.Type.valueOf(batchRequestEntity.getRequestType()));
+        batchRequest.setUri(batchRequestEntity.getRequestUri());
+        batchRequest.setBody(batchRequestEntity.getRequestBody());
+        batchRequest.setStatus(batchRequestEntity.getRequestStatus());
+        batchRequest.setReturnCode(batchRequestEntity.getReturnCode());
+        batchRequest.setResponseMsg(batchRequestEntity.getReturnMessage());
+        batch.getBatchRequests().add(batchRequest);
+      }
+    }
+
+    schedule.setDayOfWeek(requestScheduleEntity.getDayOfWeek());
+    schedule.setDaysOfMonth(requestScheduleEntity.getDaysOfMonth());
+    schedule.setMinutes(requestScheduleEntity.getMinutes());
+    schedule.setHours(requestScheduleEntity.getHours());
+    schedule.setMonth(requestScheduleEntity.getMonth());
+    schedule.setYear(requestScheduleEntity.getYear());
+    schedule.setStartTime(requestScheduleEntity.getStartTime());
+    schedule.setEndTime(requestScheduleEntity.getEndTime());
+
+    isPersisted = true;
+  }
+
+  @Override
+  public Long getId() {
+    return requestScheduleEntity.getScheduleId();
+  }
+
+  @Override
+  public String getClusterName() {
+    return cluster.getClusterName();
+  }
+
+  @Override
+  public Batch getBatch() {
+    return batch;
+  }
+
+  @Override
+  public void setBatch(Batch batch) {
+    this.batch = batch;
+  }
+
+  @Override
+  public Schedule getSchedule() {
+    return schedule;
+  }
+
+  @Override
+  public void setSchedule(Schedule schedule) {
+    this.schedule = schedule;
+  }
+
+  @Override
+  public RequestScheduleResponse convertToResponse() {
+    readWriteLock.readLock().lock();
+    try{
+      RequestScheduleResponse response = new RequestScheduleResponse(
+        getId(), getClusterName(), getDescription(), getStatus(), getBatch(),
+        getSchedule(), requestScheduleEntity.getCreateUser(),
+        DateUtils.convertToReadableTime(requestScheduleEntity.getCreateTimestamp()),
+        requestScheduleEntity.getUpdateUser(),
+        DateUtils.convertToReadableTime(requestScheduleEntity.getUpdateTimestamp())
+      );
+      return response;
+    } finally {
+      readWriteLock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void persist() {
+    readWriteLock.writeLock().lock();
+    try {
+      if (!isPersisted) {
+        persistEntities();
+        refresh();
+        cluster.refresh();
+        isPersisted = true;
+      } else {
+        saveIfPersisted();
+      }
+    } finally {
+      readWriteLock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void refresh() {
+    readWriteLock.writeLock().lock();
+    try{
+      if (isPersisted) {
+        RequestScheduleEntity scheduleEntity = requestScheduleDAO.findById
+          (requestScheduleEntity.getScheduleId());
+        requestScheduleDAO.refresh(scheduleEntity);
+      }
+    } finally {
+      readWriteLock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void delete() {
+    readWriteLock.writeLock().lock();
+    try {
+      if (isPersisted) {
+        batchRequestDAO.removeByScheduleId(requestScheduleEntity.getScheduleId());
+        requestScheduleDAO.remove(requestScheduleEntity);
+        cluster.refresh();
+        isPersisted = false;
+      }
+    } finally {
+      readWriteLock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public String getStatus() {
+    return requestScheduleEntity.getStatus();
+  }
+
+  @Override
+  public void setDescription(String description) {
+    requestScheduleEntity.setDescription(description);
+  }
+
+  @Override
+  public String getDescription() {
+    return requestScheduleEntity.getDescription();
+  }
+
+  /**
+   * Persist @RequestScheduleEntity with @RequestScheduleBatchHostEntity
+   */
+  @Transactional
+  private void persistEntities() {
+    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+    requestScheduleEntity.setClusterEntity(clusterEntity);
+    requestScheduleEntity.setCreateTimestamp(System.currentTimeMillis());
+    requestScheduleEntity.setUpdateTimestamp(System.currentTimeMillis());
+    requestScheduleDAO.create(requestScheduleEntity);
+
+    persistRequestMapping();
+  }
+
+  @Transactional
+  private void persistRequestMapping() {
+    // Delete existing mappings to support updates
+    if (isPersisted) {
+      batchRequestDAO.removeByScheduleId(requestScheduleEntity.getScheduleId());
+      requestScheduleEntity.getRequestScheduleBatchRequestEntities().clear();
+    }
+
+    if (batch != null) {
+      List<BatchRequest> batchRequests = batch.getBatchRequests();
+      if (batchRequests != null) {
+        // Sort by orderId and assign increasing batch id
+        Collections.sort(batchRequests);
+        Long batchId = 1L;
+        for (BatchRequest batchRequest : batchRequests) {
+          RequestScheduleBatchRequestEntity batchRequestEntity = new
+            RequestScheduleBatchRequestEntity();
+          batchRequestEntity.setBatchId(batchId);
+          batchRequestEntity.setScheduleId(requestScheduleEntity.getScheduleId());
+          batchRequestEntity.setRequestScheduleEntity(requestScheduleEntity);
+          batchRequestEntity.setRequestType(batchRequest.getType());
+          batchRequestEntity.setRequestUri(batchRequest.getUri());
+          batchRequestEntity.setRequestBody(batchRequest.getBody());
+          batchRequestEntity.setReturnCode(batchRequest.getReturnCode());
+          batchRequestEntity.setReturnMessage(batchRequest.getResponseMsg());
+          batchRequestEntity.setRequestStatus(batchRequest.getStatus());
+          batchRequestDAO.create(batchRequestEntity);
+          requestScheduleEntity.getRequestScheduleBatchRequestEntities().add
+            (batchRequestEntity);
+          requestScheduleDAO.merge(requestScheduleEntity);
+          batchId++;
+        }
+      }
+    }
+
+
+  }
+
+  @Transactional
+  private void saveIfPersisted() {
+    if (isPersisted) {
+      requestScheduleEntity.setUpdateTimestamp(System.currentTimeMillis());
+      // Update the Entity object with new settings
+      updateBatchSettings();
+      updateSchedule();
+      // Persist schedule and settings
+      requestScheduleDAO.merge(requestScheduleEntity);
+      // Persist batches of hosts
+      persistRequestMapping();
+    }
+  }
+
+  private void updateBatchSettings() {
+    if (batch != null) {
+      BatchSettings settings = batch.getBatchSettings();
+      if (settings != null) {
+        requestScheduleEntity.setBatchSeparationInMinutes(settings.getBatchSeparationInMinutes());
+        requestScheduleEntity.setBatchTolerationLimit(settings.getTaskFailureToleranceLimit());
+      }
+    }
+  }
+
+  private void updateSchedule() {
+    if (schedule != null) {
+      requestScheduleEntity.setMinutes(schedule.getMinutes());
+      requestScheduleEntity.setHours(schedule.getHours());
+      requestScheduleEntity.setDaysOfMonth(schedule.getDaysOfMonth());
+      requestScheduleEntity.setDayOfWeek(schedule.getDayOfWeek());
+      requestScheduleEntity.setMonth(schedule.getMonth());
+      requestScheduleEntity.setYear(schedule.getYear());
+      requestScheduleEntity.setStartTime(schedule.getStartTime());
+      requestScheduleEntity.setEndTime(schedule.getEndTime());
+    }
+  }
+
+  @Override
+  public void setStatus(Status status) {
+    requestScheduleEntity.setStatus(status.name());
+  }
+
+  @Override
+  public void setLastExecutionStatus(String status) {
+    requestScheduleEntity.setLastExecutionStatus(status);
+  }
+
+  @Override
+  public void setCreateUser(String username) {
+    requestScheduleEntity.setCreateUser(username);
+  }
+
+  @Override
+  public void setUpdateUser(String username) {
+    requestScheduleEntity.setUpdateUser(username);
+  }
+
+  @Override
+  public String getCreateTime() {
+    return DateUtils.convertToReadableTime
+      (requestScheduleEntity.getCreateTimestamp());
+  }
+
+  @Override
+  public String getUpdateTime() {
+    return DateUtils.convertToReadableTime
+      (requestScheduleEntity.getUpdateTimestamp());
+  }
+
+  @Override
+  public String getCreateUser() {
+    return requestScheduleEntity.getCreateUser();
+  }
+
+  @Override
+  public String getUpdateUser() {
+    return requestScheduleEntity.getUpdateUser();
+  }
+
+}

+ 177 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/Schedule.java

@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.scheduler;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+public class Schedule {
+  private String minutes;
+  private String hours;
+  private String daysOfMonth;
+  private String month;
+  private String dayOfWeek;
+  private String year;
+  private String startTime;
+  private String endTime;
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("minutes")
+  public String getMinutes() {
+    return minutes;
+  }
+
+  public void setMinutes(String minutes) {
+    this.minutes = minutes;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("hours")
+  public String getHours() {
+    return hours;
+  }
+
+  public void setHours(String hours) {
+    this.hours = hours;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("days_of_month")
+  public String getDaysOfMonth() {
+    return daysOfMonth;
+  }
+
+  public void setDaysOfMonth(String daysOfMonth) {
+    this.daysOfMonth = daysOfMonth;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("month")
+  public String getMonth() {
+    return month;
+  }
+
+  public void setMonth(String month) {
+    this.month = month;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("day_of_week")
+  public String getDayOfWeek() {
+    return dayOfWeek;
+  }
+
+  public void setDayOfWeek(String dayOfWeek) {
+    this.dayOfWeek = dayOfWeek;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("year")
+  public String getYear() {
+    return year;
+  }
+
+  public void setYear(String year) {
+    this.year = year;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("start_time")
+  public String getStartTime() {
+    return startTime;
+  }
+
+  public void setStartTime(String startTime) {
+    this.startTime = startTime;
+  }
+
+  @JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY)
+  @JsonProperty("end_time")
+  public String getEndTime() {
+    return endTime;
+  }
+
+  public void setEndTime(String endTime) {
+    this.endTime = endTime;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    Schedule schedule = (Schedule) o;
+
+    if (dayOfWeek != null ? !dayOfWeek.equals(schedule.dayOfWeek) : schedule.dayOfWeek != null)
+      return false;
+    if (daysOfMonth != null ? !daysOfMonth.equals(schedule.daysOfMonth) : schedule.daysOfMonth != null)
+      return false;
+    if (endTime != null ? !endTime.equals(schedule.endTime) : schedule.endTime != null)
+      return false;
+    if (hours != null ? !hours.equals(schedule.hours) : schedule.hours != null)
+      return false;
+    if (minutes != null ? !minutes.equals(schedule.minutes) : schedule.minutes != null)
+      return false;
+    if (month != null ? !month.equals(schedule.month) : schedule.month != null)
+      return false;
+    if (startTime != null ? !startTime.equals(schedule.startTime) : schedule.startTime != null)
+      return false;
+    if (year != null ? !year.equals(schedule.year) : schedule.year != null)
+      return false;
+
+    return true;
+  }
+
+  public boolean isEmpty() {
+    return (minutes == null || minutes.isEmpty())
+      && (hours == null || hours.isEmpty())
+      && (dayOfWeek == null || dayOfWeek.isEmpty())
+      && (daysOfMonth == null || daysOfMonth.isEmpty())
+      && (month == null || month.isEmpty())
+      && (year == null || year.isEmpty())
+      && (startTime == null || startTime.isEmpty())
+      && (endTime == null || endTime.isEmpty());
+  }
+
+  @Override
+  public int hashCode() {
+    int result = minutes != null ? minutes.hashCode() : 0;
+    result = 31 * result + (hours != null ? hours.hashCode() : 0);
+    result = 31 * result + (daysOfMonth != null ? daysOfMonth.hashCode() : 0);
+    result = 31 * result + (month != null ? month.hashCode() : 0);
+    result = 31 * result + (dayOfWeek != null ? dayOfWeek.hashCode() : 0);
+    result = 31 * result + (year != null ? year.hashCode() : 0);
+    result = 31 * result + (startTime != null ? startTime.hashCode() : 0);
+    result = 31 * result + (endTime != null ? endTime.hashCode() : 0);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "Schedule{" +
+      "minutes='" + minutes + '\'' +
+      ", hours='" + hours + '\'' +
+      ", days_of_month='" + daysOfMonth + '\'' +
+      ", month='" + month + '\'' +
+      ", day_of_week='" + dayOfWeek + '\'' +
+      ", year='" + year + '\'' +
+      ", startTime='" + startTime + '\'' +
+      ", endTime='" + endTime + '\'' +
+      '}';
+  }
+}

+ 63 - 0
ambari-server/src/main/java/org/apache/ambari/server/utils/DateUtils.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.utils;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * Static Helper methods for datetime conversions
+ */
+public class DateUtils {
+
+  /**
+   * Milliseconds to readable format in current server timezone
+   * @param timestamp
+   * @return
+   */
+  public static String convertToReadableTime(Long timestamp) {
+    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+    return dateFormat.format(new Date(timestamp));
+  }
+
+  /**
+   * Convert time in given format to milliseconds
+   * @return
+   */
+  public static Long convertToTimestamp(String time, String format) {
+    SimpleDateFormat dateFormat = new SimpleDateFormat(format);
+    try {
+      Date date = dateFormat.parse(time);
+      return date.getTime();
+    } catch (ParseException e) {
+      e.printStackTrace();
+    }
+    return null;
+  }
+
+  /**
+   * Get difference in minutes between old date and now
+   * @param oldTime
+   * @return
+   */
+  public static Long getDateDifferenceInMinutes(Date oldTime) {
+    long diff = oldTime.getTime() - new Date().getTime();
+    return diff / (60 * 1000) % 60;
+  }
+}

+ 3 - 4
ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql

@@ -52,8 +52,8 @@ CREATE TABLE ambari_sequences (sequence_name VARCHAR(50) NOT NULL, value DECIMAL
 CREATE TABLE confgroupclusterconfigmapping (config_group_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, config_type VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', create_timestamp BIGINT NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
 CREATE TABLE configgroup (group_id BIGINT, cluster_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, tag VARCHAR(1024) NOT NULL, description VARCHAR(1024), create_timestamp BIGINT NOT NULL, PRIMARY KEY(group_id));
 CREATE TABLE configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
-CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, request_context varchar(255), status varchar(255), target_type varchar(255), target_name varchar(255) NOT NULL, target_service varchar(255) NOT NULL, target_component varchar(255), batch_requests_by_host boolean, batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime bigint, endTime bigint, last_execution_status varchar(255), PRIMARY KEY(schedule_id));
-CREATE TABLE ambari.requestschedulebatchhost (schedule_id bigint, batch_id bigint, host_name varchar(255), batch_name varchar(255), PRIMARY KEY(schedule_id, batch_id, host_name));
+CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, description varchar(255), status varchar(255), batch_separation_minutesallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body varchar(4000), request_status varchar(255), return_code smallint, return_message varchar(255), PRIMARY KEY(schedule_id, batch_id));
 
 
 ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
@@ -82,8 +82,7 @@ ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconf
 ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
 ALTER TABLE configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
+ALTER TABLE ambari.requestschedulebatchrequest ADD CONSTRAINT FK_requestschedulebatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
 
 
 INSERT INTO ambari_sequences(sequence_name, value) values ('cluster_id_seq', 0);

+ 3 - 6
ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql

@@ -43,9 +43,8 @@ CREATE TABLE configgroup (group_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, g
 CREATE TABLE confgroupclusterconfigmapping (config_group_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_type VARCHAR2(255) NOT NULL, version_tag VARCHAR2(255) NOT NULL, user_name VARCHAR2(255) DEFAULT '_db', create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
 CREATE TABLE configgrouphostmapping (config_group_id NUMBER(19) NOT NULL, host_name VARCHAR2(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
 CREATE TABLE action (action_name VARCHAR2(255) NOT NULL, action_type VARCHAR2(255) NOT NULL, inputs VARCHAR2(1024), target_service VARCHAR2(255), target_component VARCHAR2(255), default_timeout NUMBER(10) NOT NULL, description VARCHAR2(1024), target_type VARCHAR2(255), PRIMARY KEY (action_name));
-CREATE TABLE ambari.requestschedule (schedule_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, request_context VARCHAR2(255), status VARCHAR2(255), target_type VARCHAR2(255), target_name VARCHAR2(255) NOT NULL, target_service VARCHAR2(255) NOT NULL, target_component VARCHAR2(255), batch_requests_by_host char check (batch_requests_by_host in ('FALSE','TRUE')), batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user VARCHAR2(255), create_timestamp NUMBER(19), update_user VARCHAR2(255), update_timestamp NUMBER(19), minutes VARCHAR2(10), hours VARCHAR2(10), days_of_month VARCHAR2(10), month VARCHAR2(10), day_of_week VARCHAR2(10), yearToSchedule VARCHAR2(10), startTime NUMBER(19), endTime NUMBER(19), last_execution_status VARCHAR2(255), PRIMARY KEY(schedule_id));
-CREATE TABLE ambari.requestschedulebatchhost (schedule_id NUMBER(19), batch_id NUMBER(19), host_name VARCHAR2(255), batch_name VARCHAR2(255), PRIMARY KEY(schedule_id, batch_id, host_name));
-
+CREATE TABLE ambari.requestschedule (schedule_id NUMBER(19), cluster_id NUMBER(19) NOT NULL, description VARCHAR2(255), status VARCHAR2(255), batch_separation_minutes smallint, batch_toleration_limit smallint, create_user VARCHAR2(255), create_timestamp NUMBER(19), update_user VARCHAR2(255), update_timestamp NUMBER(19), minutes VARCHAR2(10), hours VARCHAR2(10), days_of_month VARCHAR2(10), month VARCHAR2(10), day_of_week VARCHAR2(10), yearToSchedule VARCHAR2(10), startTime VARCHAR2(50), endTime VARCHAR2(50), last_execution_status VARCHAR2(255), PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedulebatchrequest (schedule_id NUMBER(19), batch_id NUMBER(19), request_id NUMBER(19), request_type VARCHAR2(255), request_uri VARCHAR2(1024), request_body VARCHAR2(4000), request_status VARCHAR2(255), return_code smallint, return_message VARCHAR2(255), PRIMARY KEY(schedule_id, batch_id));
 
 ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, ldap_user);
 ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
@@ -73,9 +72,7 @@ ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconf
 ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
 ALTER TABLE confgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
 ALTER TABLE confgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
-
+ALTER TABLE ambari.requestschedulebatchrequest ADD CONSTRAINT FK_requestschedulebatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
 
 INSERT INTO ambari_sequences(sequence_name, value) values ('host_role_command_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, value) values ('user_id_seq', 1);

+ 4 - 5
ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql

@@ -110,11 +110,11 @@ GRANT ALL PRIVILEGES ON TABLE ambari.configgrouphostmapping TO :username;
 CREATE TABLE ambari.action (action_name VARCHAR(255) NOT NULL, action_type VARCHAR(32) NOT NULL, inputs VARCHAR(1000), target_service VARCHAR(255), target_component VARCHAR(255), default_timeout SMALLINT NOT NULL, description VARCHAR(1000), target_type VARCHAR(32), PRIMARY KEY (action_name));
 GRANT ALL PRIVILEGES ON TABLE ambari.action TO :username;
 
-CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, request_context varchar(255), status varchar(255), target_type varchar(255), target_name varchar(255) NOT NULL, target_service varchar(255) NOT NULL, target_component varchar(255), batch_requests_by_host boolean, batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime bigint, endTime bigint, last_execution_status varchar(255), PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id bigint NOT NULL, description varchar(255), status varchar(255), batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
 GRANT ALL PRIVILEGES ON TABLE ambari.requestschedule TO :username;
 
-CREATE TABLE ambari.requestschedulebatchhost (schedule_id bigint, batch_id bigint, host_name varchar(255), batch_name varchar(255), PRIMARY KEY(schedule_id, batch_id, host_name));
-GRANT ALL PRIVILEGES ON TABLE ambari.requestschedulebatchhost TO :username;
+CREATE TABLE ambari.requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body varchar(4000), request_status varchar(255), return_code smallint, return_message varchar(255), PRIMARY KEY(schedule_id, batch_id));
+GRANT ALL PRIVILEGES ON TABLE ambari.requestschedulebatchrequest TO :username;
 
 --------altering tables by creating foreign keys----------
 ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
@@ -144,8 +144,7 @@ ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclus
 ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
+ALTER TABLE ambari.requestschedulebatchrequest ADD CONSTRAINT FK_requestschedulebatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
 
 ---------inserting some data-----------
 BEGIN;

+ 3 - 4
ambari-server/src/main/resources/Ambari-DDL-Postgres-REMOTE-CREATE.sql

@@ -45,8 +45,8 @@ CREATE TABLE ambari.ambari_sequences (sequence_name VARCHAR(255) PRIMARY KEY, "v
 CREATE TABLE ambari.configgroup (group_id BIGINT, cluster_id BIGINT NOT NULL, group_name VARCHAR(255) NOT NULL, tag VARCHAR(1024) NOT NULL, description VARCHAR(1024), create_timestamp BIGINT NOT NULL, PRIMARY KEY(group_id));
 CREATE TABLE ambari.confgroupclusterconfigmapping (config_group_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, config_type VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, user_name VARCHAR(255) DEFAULT '_db', create_timestamp BIGINT NOT NULL, PRIMARY KEY(config_group_id, cluster_id, config_type));
 CREATE TABLE ambari.configgrouphostmapping (config_group_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY(config_group_id, host_name));
-CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, request_context varchar(255), status varchar(255), target_type varchar(255), target_name varchar(255) NOT NULL, target_service varchar(255) NOT NULL, target_component varchar(255), batch_requests_by_host boolean, batch_host_count smallint, batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime bigint, endTime bigint, PRIMARY KEY(schedule_id));
-CREATE TABLE ambari.requestschedulebatchhost (schedule_id bigint, batch_id bigint, host_name varchar(255), batch_name varchar(255), PRIMARY KEY(schedule_id, batch_id, host_name));
+CREATE TABLE ambari.requestschedule (schedule_id bigint, cluster_id BIGINT NOT NULL, status varchar(255), batch_separation_minutes smallint, batch_toleration_limit smallint, create_user varchar(255), create_timestamp bigint, update_user varchar(255), update_timestamp bigint, minutes varchar(10), hours varchar(10), days_of_month varchar(10), month varchar(10), day_of_week varchar(10), yearToSchedule varchar(10), startTime varchar(50), endTime varchar(50), last_execution_status varchar(255), PRIMARY KEY(schedule_id));
+CREATE TABLE ambari.requestschedulebatchrequest (schedule_id bigint, batch_id bigint, request_id bigint, request_type varchar(255), request_uri varchar(1024), request_body varchar(4000), request_status varchar(255), return_code smallint, return_message varchar(255), PRIMARY KEY(schedule_id, batch_id));
 
 ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
@@ -75,8 +75,7 @@ ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclus
 ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclusterconfigmapping_group_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.requestschedulebatchhost ADD CONSTRAINT FK_requestschedulebatchhost_schedule FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
+ALTER TABLE ambari.requestschedulebatchrequest ADD CONSTRAINT FK_requestschedulebatchrequest_schedule_id FOREIGN KEY (schedule_id) REFERENCES ambari.requestschedule (schedule_id);
 
 BEGIN;
 

+ 1 - 1
ambari-server/src/main/resources/META-INF/persistence.xml

@@ -39,7 +39,7 @@
     <class>org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity</class>
     <class>org.apache.ambari.server.orm.entities.ActionEntity</class>
     <class>org.apache.ambari.server.orm.entities.RequestScheduleEntity</class>
-    <class>org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntity</class>
+    <class>org.apache.ambari.server.orm.entities.RequestScheduleBatchRequestEntity</class>
 
     <properties>
       <!--<property name="javax.persistence.jdbc.url" value="jdbc:postgresql://localhost/ambari" />-->

+ 4 - 0
ambari-server/src/main/resources/key_properties.json

@@ -102,5 +102,9 @@
   "ConfigGroup": {
     "Cluster": "ConfigGroup/cluster_name",
     "ConfigGroup": "ConfigGroup/id"
+  },
+  "RequestSchedule" : {
+    "Cluster": "RequestSchedule/cluster_name",
+    "RequestSchedule": "RequestSchedule/id"
   }
 }

+ 9 - 0
ambari-server/src/main/resources/properties.json

@@ -105,6 +105,15 @@
         "Requests/progress_percent",
         "_"
     ],
+    "RequestSchedule" : [
+        "RequestSchedule/id",
+        "RequestSchedule/cluster_name",
+        "RequestSchedule/description",
+        "RequestSchedule/status",
+        "RequestSchedule/batch",
+        "RequestSchedule/schedule",
+        "_"
+    ],
     "Task":[
         "Tasks/id",
         "Tasks/request_id",

+ 5 - 0
ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java

@@ -45,6 +45,9 @@ import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.configgroup.ConfigGroupImpl;
 import org.apache.ambari.server.state.host.HostFactory;
 import org.apache.ambari.server.state.host.HostImpl;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.scheduler.RequestExecutionImpl;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -275,6 +278,8 @@ public class AgentResourceTest extends JerseyTest {
           Config.class, ConfigImpl.class).build(ConfigFactory.class));
       install(new FactoryModuleBuilder().implement(
         ConfigGroup.class, ConfigGroupImpl.class).build(ConfigGroupFactory.class));
+      install(new FactoryModuleBuilder().implement(RequestExecution.class,
+        RequestExecutionImpl.class).build(RequestExecutionFactory.class));
       install(new FactoryModuleBuilder().build(StageFactory.class));
       install(new FactoryModuleBuilder().build(HostRoleCommandFactory.class));
     }

+ 404 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestScheduleResourceProviderTest.java

@@ -0,0 +1,404 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.RequestScheduleResponse;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.scheduler.Batch;
+import org.apache.ambari.server.state.scheduler.BatchRequest;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.scheduler.Schedule;
+import org.easymock.Capture;
+import org.easymock.IAnswer;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+public class RequestScheduleResourceProviderTest {
+
+  RequestScheduleResourceProvider getResourceProvider
+    (AmbariManagementController managementController) {
+
+    Resource.Type type = Resource.Type.RequestSchedule;
+
+    return (RequestScheduleResourceProvider)
+      AbstractControllerResourceProvider.getResourceProvider(
+        type,
+        PropertyHelper.getPropertyIds(type),
+        PropertyHelper.getKeyPropertyIds(type),
+        managementController
+      );
+  }
+
+  @Test
+  public void testCreateRequestSchedule() throws Exception {
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    RequestExecutionFactory executionFactory = createNiceMock
+      (RequestExecutionFactory.class);
+    RequestExecution requestExecution = createNiceMock(RequestExecution.class);
+
+    expect(managementController.getClusters()).andReturn(clusters);
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+    expect(managementController.getRequestExecutionFactory()).andReturn
+      (executionFactory);
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+
+    Capture<Cluster> clusterCapture = new Capture<Cluster>();
+    Capture<Batch> batchCapture = new Capture<Batch>();
+    Capture<Schedule> scheduleCapture = new Capture<Schedule>();
+
+    expect(executionFactory.createNew(capture(clusterCapture),
+      capture(batchCapture), capture(scheduleCapture))).andReturn(requestExecution);
+
+    replay(managementController, clusters, cluster, executionFactory,
+      requestExecution, response);
+
+    RequestScheduleResourceProvider resourceProvider = getResourceProvider
+      (managementController);
+
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_DESC_PROPERTY_ID, "some description");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_DAY_OF_WEEK_PROPERTY_ID, "MON");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_MINUTES_PROPERTY_ID, "2");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_END_TIME_PROPERTY_ID, "2013-11-18T14:29:29-08:00");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_DAYS_OF_MONTH_PROPERTY_ID, "*");
+
+    HashSet<Map<String, Object>> batch = new HashSet<Map<String, Object>>();
+    Map<String, Object> batchSettings = new HashMap<String, Object>();
+    batchSettings.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_BATCH_SEPARATION_PROPERTY_ID, "15");
+
+    Map<String, Object> batchRequests = new HashMap<String, Object>();
+    HashSet<Map<String, Object>> requestSet = new HashSet<Map<String, Object>>();
+
+    Map<String, Object> request1 = new HashMap<String, Object>();
+    Map<String, Object> request2 = new HashMap<String, Object>();
+
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_TYPE_PROPERTY_ID, BatchRequest.Type.PUT.name());
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_ORDER_ID_PROPERTY_ID, "20");
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_URI_PROPERTY_ID, "SomeUpdateUri");
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_BODY_PROPERTY_ID, "data1");
+
+    request2.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_TYPE_PROPERTY_ID, BatchRequest.Type.DELETE.name());
+    request2.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_ORDER_ID_PROPERTY_ID, "22");
+    request2.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_URI_PROPERTY_ID, "SomeDeleteUri");
+
+    requestSet.add(request1);
+    requestSet.add(request2);
+
+    batchRequests.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_BATCH_REQUESTS_PROPERTY_ID, requestSet);
+
+    batch.add(batchSettings);
+    batch.add(batchRequests);
+
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_BATCH_PROPERTY_ID, batch);
+
+    propertySet.add(properties);
+    Request request = PropertyHelper.getCreateRequest(propertySet, null);
+    resourceProvider.createResources(request);
+
+    verify(managementController, clusters, cluster, executionFactory,
+      requestExecution, response);
+
+    List<BatchRequest> testRequests = batchCapture.getValue().getBatchRequests();
+    Assert.assertNotNull(testRequests);
+    BatchRequest deleteReq = null;
+    BatchRequest putReq = null;
+    for (BatchRequest testBatchRequest : testRequests) {
+      if (testBatchRequest.getType().equals(BatchRequest.Type.DELETE.name())) {
+        deleteReq = testBatchRequest;
+      } else {
+        putReq = testBatchRequest;
+      }
+    }
+    Assert.assertNotNull(deleteReq);
+    Assert.assertNotNull(putReq);
+    Assert.assertEquals("data1", putReq.getBody());
+    Assert.assertNull(deleteReq.getBody());
+  }
+
+  @Test
+  public void testUpdateRequestSchedule() throws Exception {
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    final RequestExecution requestExecution = createNiceMock(RequestExecution.class);
+    RequestScheduleResponse requestScheduleResponse = createNiceMock
+      (RequestScheduleResponse.class);
+
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+
+    expect(requestExecution.getId()).andReturn(25L).anyTimes();
+    expect(requestExecution.convertToResponse()).andReturn
+      (requestScheduleResponse).anyTimes();
+    expect(requestScheduleResponse.getId()).andReturn(25L).anyTimes();
+    expect(requestScheduleResponse.getClusterName()).andReturn("Cluster100")
+      .anyTimes();
+
+    expect(cluster.getAllRequestExecutions()).andStubAnswer(new IAnswer<Map<Long, RequestExecution>>() {
+      @Override
+      public Map<Long, RequestExecution> answer() throws Throwable {
+        Map<Long, RequestExecution> requestExecutionMap = new HashMap<Long,
+          RequestExecution>();
+        requestExecutionMap.put(requestExecution.getId(), requestExecution);
+        return requestExecutionMap;
+      }
+    });
+
+    replay(managementController, clusters, cluster, requestExecution,
+      response, requestScheduleResponse);
+
+    RequestScheduleResourceProvider resourceProvider = getResourceProvider
+      (managementController);
+
+    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_DESC_PROPERTY_ID, "some description");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_DAY_OF_WEEK_PROPERTY_ID, "MON");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_MINUTES_PROPERTY_ID, "2");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_END_TIME_PROPERTY_ID, "2013-11-18T14:29:29-08:00");
+    properties.put(RequestScheduleResourceProvider
+      .SCHEDULE_DAYS_OF_MONTH_PROPERTY_ID, "*");
+
+    HashSet<Map<String, Object>> batch = new HashSet<Map<String, Object>>();
+    Map<String, Object> batchSettings = new HashMap<String, Object>();
+    batchSettings.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_BATCH_SEPARATION_PROPERTY_ID, "15");
+
+    Map<String, Object> batchRequests = new HashMap<String, Object>();
+    HashSet<Map<String, Object>> requestSet = new HashSet<Map<String, Object>>();
+
+    Map<String, Object> request1 = new HashMap<String, Object>();
+    Map<String, Object> request2 = new HashMap<String, Object>();
+
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_TYPE_PROPERTY_ID, BatchRequest.Type.PUT.name());
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_ORDER_ID_PROPERTY_ID, "20");
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_URI_PROPERTY_ID, "SomeUpdateUri");
+    request1.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_BODY_PROPERTY_ID, "data1");
+
+    request2.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_TYPE_PROPERTY_ID, BatchRequest.Type.DELETE.name());
+    request2.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_ORDER_ID_PROPERTY_ID, "22");
+    request2.put(RequestScheduleResourceProvider
+      .BATCH_REQUEST_URI_PROPERTY_ID, "SomeDeleteUri");
+
+    requestSet.add(request1);
+    requestSet.add(request2);
+
+    batchRequests.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_BATCH_REQUESTS_PROPERTY_ID, requestSet);
+
+    batch.add(batchSettings);
+    batch.add(batchRequests);
+
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_BATCH_PROPERTY_ID, batch);
+
+    Map<String, String> mapRequestProps = new HashMap<String, String>();
+    mapRequestProps.put("context", "Called from a test");
+
+    Request request = PropertyHelper.getUpdateRequest(properties, mapRequestProps);
+    Predicate predicate = new PredicateBuilder().property
+      (RequestScheduleResourceProvider.REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID)
+      .equals("Cluster100").and().property(RequestScheduleResourceProvider
+        .REQUEST_SCHEDULE_ID_PROPERTY_ID).equals(25L).toPredicate();
+
+    resourceProvider.updateResources(request, predicate);
+
+    verify(managementController, clusters, cluster, requestExecution,
+      response, requestScheduleResponse);
+  }
+
+  @Test
+  public void testGetRequestSchedule() throws Exception {
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    final RequestExecution requestExecution = createNiceMock(RequestExecution.class);
+    RequestScheduleResponse requestScheduleResponse = createNiceMock
+      (RequestScheduleResponse.class);
+
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+
+    expect(requestExecution.getId()).andReturn(25L).anyTimes();
+    expect(requestExecution.getStatus()).andReturn(RequestExecution.Status
+      .SCHEDULED.name()).anyTimes();
+    expect(requestExecution.convertToResponse()).andReturn
+      (requestScheduleResponse).anyTimes();
+    expect(requestScheduleResponse.getId()).andReturn(25L).anyTimes();
+    expect(requestScheduleResponse.getClusterName()).andReturn("Cluster100")
+      .anyTimes();
+
+    expect(cluster.getAllRequestExecutions()).andStubAnswer(new IAnswer<Map<Long, RequestExecution>>() {
+      @Override
+      public Map<Long, RequestExecution> answer() throws Throwable {
+        Map<Long, RequestExecution> requestExecutionMap = new HashMap<Long,
+          RequestExecution>();
+        requestExecutionMap.put(requestExecution.getId(), requestExecution);
+        return requestExecutionMap;
+      }
+    });
+
+    replay(managementController, clusters, cluster, requestExecution,
+      response, requestScheduleResponse);
+
+    RequestScheduleResourceProvider resourceProvider = getResourceProvider
+      (managementController);
+
+    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+    properties.put(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_DESC_PROPERTY_ID, "some description");
+
+    Set<String> propertyIds = new HashSet<String>();
+    propertyIds.add(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID);
+    propertyIds.add(RequestScheduleResourceProvider
+      .REQUEST_SCHEDULE_ID_PROPERTY_ID);
+
+    Request request = PropertyHelper.getReadRequest(propertyIds);
+
+    // Read by id
+    Predicate predicate = new PredicateBuilder().property
+      (RequestScheduleResourceProvider.REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID)
+      .equals("Cluster100").and().property(RequestScheduleResourceProvider
+        .REQUEST_SCHEDULE_ID_PROPERTY_ID).equals(25L).toPredicate();
+
+    Set<Resource> resources = resourceProvider.getResources(request,
+      predicate);
+
+    Assert.assertEquals(1, resources.size());
+    Assert.assertEquals(25L, resources.iterator().next().getPropertyValue
+      (RequestScheduleResourceProvider.REQUEST_SCHEDULE_ID_PROPERTY_ID));
+
+    // Read all
+    predicate = new PredicateBuilder().property
+      (RequestScheduleResourceProvider.REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID)
+      .equals("Cluster100").toPredicate();
+
+    resources = resourceProvider.getResources(request, predicate);
+
+    Assert.assertEquals(1, resources.size());
+    Assert.assertEquals(25L, resources.iterator().next().getPropertyValue
+      (RequestScheduleResourceProvider.REQUEST_SCHEDULE_ID_PROPERTY_ID));
+
+    verify(managementController, clusters, cluster, requestExecution,
+      response, requestScheduleResponse);
+  }
+
+  @Test
+  public void testDeleteRequestSchedule() throws Exception {
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
+
+    cluster.deleteRequestExecution(1L);
+
+    replay(managementController, clusters, cluster);
+
+    RequestScheduleResourceProvider resourceProvider = getResourceProvider
+      (managementController);
+
+    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
+
+    ((ObservableResourceProvider) resourceProvider).addObserver(observer);
+
+    Predicate predicate = new PredicateBuilder().property
+      (RequestScheduleResourceProvider.REQUEST_SCHEDULE_CLUSTER_NAME_PROPERTY_ID)
+      .equals("Cluster100").and().property(RequestScheduleResourceProvider
+        .REQUEST_SCHEDULE_ID_PROPERTY_ID).equals(1L).toPredicate();
+
+    resourceProvider.deleteResources(predicate);
+
+    ResourceProviderEvent lastEvent = observer.getLastEvent();
+    Assert.assertNotNull(lastEvent);
+    Assert.assertEquals(Resource.Type.RequestSchedule, lastEvent.getResourceType());
+    Assert.assertEquals(ResourceProviderEvent.Type.Delete, lastEvent.getType());
+    Assert.assertEquals(predicate, lastEvent.getPredicate());
+    Assert.assertNull(lastEvent.getRequest());
+
+    verify(managementController, clusters, cluster);
+  }
+}

+ 0 - 2
ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java

@@ -21,13 +21,11 @@ package org.apache.ambari.server.orm;
 import com.google.inject.AbstractModule;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.ControllerModule;
-
 import java.util.Properties;
 
 public class InMemoryDefaultTestModule extends AbstractModule {
   Properties properties = new Properties();
 
-
   @Override
   protected void configure() {
     properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");

+ 26 - 23
ambari-server/src/test/java/org/apache/ambari/server/orm/dao/RequestScheduleDAOTest.java

@@ -26,8 +26,9 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.RequestScheduleBatchHostEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchRequestEntity;
 import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
+import org.apache.ambari.server.state.scheduler.BatchRequest;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,7 +40,10 @@ public class RequestScheduleDAOTest {
   private HostDAO hostDAO;
   private ClusterDAO clusterDAO;
   private RequestScheduleDAO requestScheduleDAO;
-  private RequestScheduleBatchHostDAO batchHostDAO;
+  private RequestScheduleBatchRequestDAO batchRequestDAO;
+  private String testUri = "http://localhost/blah";
+  private String testBody = "ValidJson";
+  private String testType = BatchRequest.Type.POST.name();
 
   @Before
   public void setup() throws Exception {
@@ -50,7 +54,7 @@ public class RequestScheduleDAOTest {
     hostDAO = injector.getInstance(HostDAO.class);
     clusterDAO = injector.getInstance(ClusterDAO.class);
     requestScheduleDAO = injector.getInstance(RequestScheduleDAO.class);
-    batchHostDAO = injector.getInstance(RequestScheduleBatchHostDAO.class);
+    batchRequestDAO = injector.getInstance(RequestScheduleBatchRequestDAO.class);
   }
 
   @After
@@ -67,13 +71,7 @@ public class RequestScheduleDAOTest {
 
     scheduleEntity.setClusterEntity(clusterEntity);
     scheduleEntity.setClusterId(clusterEntity.getClusterId());
-    scheduleEntity.setRequestContext("Test");
     scheduleEntity.setStatus("SCHEDULED");
-    scheduleEntity.setTargetType("ACTION");
-    scheduleEntity.setTargetName("REBALANCE");
-    scheduleEntity.setTargetService("HDFS");
-    scheduleEntity.setTargetComponent("DATANODE");
-    scheduleEntity.setBatchRequestByHost(false);
     scheduleEntity.setMinutes("30");
     scheduleEntity.setHours("12");
     scheduleEntity.setDayOfWeek("*");
@@ -87,17 +85,21 @@ public class RequestScheduleDAOTest {
     hostEntity.setOsType("centOS");
     hostDAO.create(hostEntity);
 
-    RequestScheduleBatchHostEntity batchHostEntity = new
-      RequestScheduleBatchHostEntity();
+    RequestScheduleBatchRequestEntity batchRequestEntity = new
+      RequestScheduleBatchRequestEntity();
 
-    batchHostEntity.setBatchId(1L);
-    batchHostEntity.setScheduleId(scheduleEntity.getScheduleId());
-    batchHostEntity.setRequestScheduleEntity(scheduleEntity);
-    batchHostEntity.setHostName(hostEntity.getHostName());
-    batchHostEntity.setRequestScheduleEntity(scheduleEntity);
-    batchHostDAO.create(batchHostEntity);
+    batchRequestEntity.setBatchId(1L);
+    batchRequestEntity.setScheduleId(scheduleEntity.getScheduleId());
+    batchRequestEntity.setRequestScheduleEntity(scheduleEntity);
+    batchRequestEntity.setRequestScheduleEntity(scheduleEntity);
+    batchRequestEntity.setRequestType(testType);
+    batchRequestEntity.setRequestUri(testUri);
+    batchRequestEntity.setRequestBody(testBody);
 
-    scheduleEntity.getRequestScheduleBatchHostEntities().add(batchHostEntity);
+    batchRequestDAO.create(batchRequestEntity);
+
+    scheduleEntity.getRequestScheduleBatchRequestEntities().add
+      (batchRequestEntity);
     scheduleEntity = requestScheduleDAO.merge(scheduleEntity);
 
     return scheduleEntity;
@@ -109,12 +111,13 @@ public class RequestScheduleDAOTest {
 
     Assert.assertTrue(scheduleEntity.getScheduleId() > 0);
     Assert.assertEquals("SCHEDULED", scheduleEntity.getStatus());
-    Assert.assertEquals("REBALANCE", scheduleEntity.getTargetName());
-    Assert.assertEquals("HDFS", scheduleEntity.getTargetService());
-    Assert.assertEquals(false, scheduleEntity.getIsBatchRequestByHost());
     Assert.assertEquals("12", scheduleEntity.getHours());
-    Assert.assertEquals("h1", scheduleEntity
-      .getRequestScheduleBatchHostEntities().iterator().next().getHostName());
+    RequestScheduleBatchRequestEntity batchRequestEntity = scheduleEntity
+      .getRequestScheduleBatchRequestEntities().iterator().next();
+    Assert.assertNotNull(batchRequestEntity);
+    Assert.assertEquals(testUri, batchRequestEntity.getRequestUri());
+    Assert.assertEquals(testType, batchRequestEntity.getRequestType());
+    Assert.assertEquals(testBody, batchRequestEntity.getRequestBody());
   }
 
   @Test

+ 110 - 0
ambari-server/src/test/java/org/apache/ambari/server/scheduler/ExecutionSchedulerTest.java

@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.scheduler;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.configuration.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.quartz.Scheduler;
+import org.quartz.impl.StdSchedulerFactory;
+import java.util.Properties;
+import static org.easymock.EasyMock.expect;
+import static org.mockito.Mockito.spy;
+import static org.powermock.api.easymock.PowerMock.createNiceMock;
+import static org.powermock.api.easymock.PowerMock.expectNew;
+import static org.powermock.api.easymock.PowerMock.expectPrivate;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({ ExecutionSchedulerImpl.class })
+@PowerMockIgnore("javax.management.*")
+public class ExecutionSchedulerTest {
+
+  private Configuration configuration;
+
+  @Before
+  public void setup() throws Exception {
+    Properties properties = new Properties();
+    properties.setProperty(Configuration.EXECUTION_SCHEDULER_THREADS, "2");
+    properties.setProperty(Configuration.EXECUTION_SCHEDULER_CLUSTERED, "false");
+    properties.setProperty(Configuration.EXECUTION_SCHEDULER_CONNECTIONS, "2");
+    properties.setProperty(Configuration.SERVER_JDBC_DRIVER_KEY, "db.driver");
+    properties.setProperty(Configuration.SERVER_JDBC_URL_KEY, "db.url");
+    properties.setProperty(Configuration.SERVER_JDBC_USER_NAME_KEY, "user");
+    properties.setProperty(Configuration.SERVER_JDBC_USER_PASSWD_KEY,
+      "ambari-server/src/test/resources/password.dat");
+    properties.setProperty(Configuration.SERVER_DB_NAME_KEY, "derby");
+
+    this.configuration = new Configuration(properties);
+
+  }
+
+  @After
+  public void teardown() throws Exception {
+  }
+
+
+  @Test
+  public void testSchedulerInitialize() throws Exception {
+
+    ExecutionSchedulerImpl executionScheduler = spy(new ExecutionSchedulerImpl(configuration));
+
+    Properties actualProperties = executionScheduler
+      .getQuartzSchedulerProperties();
+
+    Assert.assertEquals("2", actualProperties.getProperty("org.quartz.threadPool.threadCount"));
+    Assert.assertEquals("2", actualProperties.getProperty("org.quartz.dataSource.myDS.maxConnections"));
+    Assert.assertEquals("false", actualProperties.getProperty("org.quartz.jobStore.isClustered"));
+    Assert.assertEquals("org.quartz.impl.jdbcjobstore.StdJDBCDelegate",
+      actualProperties.getProperty("org.quartz.jobStore.driverDelegateClass"));
+    Assert.assertEquals("select 0",
+      actualProperties.getProperty("org.quartz.dataSource.myDS.validationQuery"));
+    Assert.assertEquals(ExecutionSchedulerImpl.DEFAULT_SCHEDULER_NAME,
+      actualProperties.getProperty("org.quartz.scheduler.instanceName"));
+    Assert.assertEquals("org.quartz.simpl.SimpleThreadPool",
+      actualProperties.getProperty("org.quartz.threadPool.class"));
+  }
+
+  @Test
+  public void testSchedulerStartStop() throws Exception {
+    StdSchedulerFactory factory = createNiceMock(StdSchedulerFactory.class);
+    Scheduler scheduler = createNiceMock(Scheduler.class);
+
+    expect(factory.getScheduler()).andReturn(scheduler);
+    expectPrivate(scheduler, "start").once();
+    expectNew(StdSchedulerFactory.class).andReturn(factory);
+    expectPrivate(scheduler, "shutdown").once();
+
+    PowerMock.replay(factory, StdSchedulerFactory.class, scheduler);
+
+    ExecutionSchedulerImpl executionScheduler = new ExecutionSchedulerImpl(configuration);
+
+    executionScheduler.startScheduler();
+    executionScheduler.stopScheduler();
+
+    PowerMock.verify(factory, StdSchedulerFactory.class, scheduler);
+
+    Assert.assertTrue(executionScheduler.isInitialized());
+  }
+}

+ 275 - 0
ambari-server/src/test/java/org/apache/ambari/server/state/RequestExecutionTest.java

@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+import com.google.inject.persist.Transactional;
+import junit.framework.Assert;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.RequestScheduleDAO;
+import org.apache.ambari.server.orm.entities.RequestScheduleBatchRequestEntity;
+import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
+import org.apache.ambari.server.state.scheduler.Batch;
+import org.apache.ambari.server.state.scheduler.BatchRequest;
+import org.apache.ambari.server.state.scheduler.BatchSettings;
+import org.apache.ambari.server.state.scheduler.RequestExecution;
+import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
+import org.apache.ambari.server.state.scheduler.Schedule;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+public class RequestExecutionTest {
+  private Injector injector;
+  private Clusters clusters;
+  private Cluster cluster;
+  private String clusterName;
+  private AmbariMetaInfo metaInfo;
+  private RequestExecutionFactory requestExecutionFactory;
+  private RequestScheduleDAO requestScheduleDAO;
+
+  @Before
+  public void setup() throws Exception {
+    injector = Guice.createInjector(new InMemoryDefaultTestModule());
+    injector.getInstance(GuiceJpaInitializer.class);
+    clusters = injector.getInstance(Clusters.class);
+    metaInfo = injector.getInstance(AmbariMetaInfo.class);
+    requestExecutionFactory = injector.getInstance(RequestExecutionFactory.class);
+    requestScheduleDAO = injector.getInstance(RequestScheduleDAO.class);
+
+    metaInfo.init();
+    clusterName = "foo";
+    clusters.addCluster(clusterName);
+    cluster = clusters.getCluster(clusterName);
+    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
+    Assert.assertNotNull(cluster);
+    clusters.addHost("h1");
+    clusters.addHost("h2");
+    clusters.addHost("h3");
+    Assert.assertNotNull(clusters.getHost("h1"));
+    Assert.assertNotNull(clusters.getHost("h2"));
+    Assert.assertNotNull(clusters.getHost("h3"));
+    clusters.getHost("h1").persist();
+    clusters.getHost("h2").persist();
+    clusters.getHost("h3").persist();
+  }
+
+  @After
+  public void teardown() throws Exception {
+    injector.getInstance(PersistService.class).stop();
+  }
+
+  @Transactional
+  private RequestExecution createRequestSchedule() throws Exception {
+    Batch batches = new Batch();
+    Schedule schedule = new Schedule();
+
+    BatchSettings batchSettings = new BatchSettings();
+    batchSettings.setTaskFailureToleranceLimit(10);
+    batches.setBatchSettings(batchSettings);
+
+    List<BatchRequest> batchRequests = new ArrayList<BatchRequest>();
+    BatchRequest batchRequest1 = new BatchRequest();
+    batchRequest1.setOrderId(10L);
+    batchRequest1.setType(BatchRequest.Type.DELETE);
+    batchRequest1.setUri("testUri1");
+
+    BatchRequest batchRequest2 = new BatchRequest();
+    batchRequest2.setOrderId(12L);
+    batchRequest2.setType(BatchRequest.Type.POST);
+    batchRequest2.setUri("testUri2");
+    batchRequest2.setBody("testBody");
+
+    batchRequests.add(batchRequest1);
+    batchRequests.add(batchRequest2);
+
+    batches.getBatchRequests().addAll(batchRequests);
+
+    schedule.setMinutes("10");
+    schedule.setEndTime("2014-01-01 00:00:00");
+
+    RequestExecution requestExecution = requestExecutionFactory.createNew
+      (cluster, batches, schedule);
+    requestExecution.setDescription("Test Schedule");
+
+    requestExecution.persist();
+
+    return requestExecution;
+  }
+
+  @Test
+  public void testCreateRequestSchedule() throws Exception {
+    RequestExecution requestExecution = createRequestSchedule();
+    Assert.assertNotNull(requestExecution);
+
+    RequestScheduleEntity scheduleEntity = requestScheduleDAO.findById
+      (requestExecution.getId());
+
+    Assert.assertNotNull(scheduleEntity);
+    Assert.assertEquals(requestExecution.getBatch().getBatchSettings()
+      .getTaskFailureToleranceLimit(), scheduleEntity.getBatchTolerationLimit());
+    Assert.assertEquals(scheduleEntity.getRequestScheduleBatchRequestEntities().size(), 2);
+    Collection<RequestScheduleBatchRequestEntity> batchRequestEntities =
+      scheduleEntity.getRequestScheduleBatchRequestEntities();
+    Assert.assertNotNull(batchRequestEntities);
+    RequestScheduleBatchRequestEntity reqEntity1 = null;
+    RequestScheduleBatchRequestEntity reqEntity2 = null;
+    for (RequestScheduleBatchRequestEntity reqEntity : batchRequestEntities) {
+      if (reqEntity.getRequestUri().equals("testUri1")) {
+        reqEntity1 = reqEntity;
+      } else if (reqEntity.getRequestUri().equals("testUri2")) {
+        reqEntity2 = reqEntity;
+      }
+    }
+    Assert.assertNotNull(reqEntity1);
+    Assert.assertNotNull(reqEntity2);
+    Assert.assertEquals(Long.valueOf(1L), reqEntity1.getBatchId());
+    Assert.assertEquals(Long.valueOf(2L), reqEntity2.getBatchId());
+    Assert.assertEquals(BatchRequest.Type.DELETE.name(), reqEntity1.getRequestType());
+    Assert.assertEquals(BatchRequest.Type.POST.name(), reqEntity2.getRequestType());
+    Assert.assertEquals(requestExecution.getSchedule().getMinutes(),
+      scheduleEntity.getMinutes());
+    Assert.assertEquals(requestExecution.getSchedule().getEndTime(),
+      scheduleEntity.getEndTime());
+  }
+
+  @Test
+  public void testUpdateRequestSchedule() throws Exception {
+    RequestExecution requestExecution = createRequestSchedule();
+    Assert.assertNotNull(requestExecution);
+    Long id = requestExecution.getId();
+    RequestScheduleEntity scheduleEntity = requestScheduleDAO.findById(id);
+    Assert.assertNotNull(scheduleEntity);
+
+    // Read from DB
+    requestExecution = requestExecutionFactory.createExisting(cluster,
+      scheduleEntity);
+
+    // Remove host and add host
+    Batch batches = new Batch();
+
+    List<BatchRequest> batchRequests = new ArrayList<BatchRequest>();
+    BatchRequest batchRequest1 = new BatchRequest();
+    batchRequest1.setOrderId(10L);
+    batchRequest1.setType(BatchRequest.Type.PUT);
+    batchRequest1.setUri("testUri3");
+
+    BatchRequest batchRequest2 = new BatchRequest();
+    batchRequest2.setOrderId(12L);
+    batchRequest2.setType(BatchRequest.Type.POST);
+    batchRequest2.setUri("testUri4");
+    batchRequest2.setBody("testBody");
+
+    batchRequests.add(batchRequest1);
+    batchRequests.add(batchRequest2);
+
+    batches.getBatchRequests().addAll(batchRequests);
+
+
+    requestExecution.setBatch(batches);
+
+    // Change schedule
+    requestExecution.getSchedule().setHours("11");
+
+    // Save
+    requestExecution.persist();
+
+    scheduleEntity = requestScheduleDAO.findById(id);
+    Assert.assertNotNull(scheduleEntity);
+    Collection<RequestScheduleBatchRequestEntity> batchRequestEntities =
+      scheduleEntity.getRequestScheduleBatchRequestEntities();
+    Assert.assertNotNull(batchRequestEntities);
+    RequestScheduleBatchRequestEntity reqEntity1 = null;
+    RequestScheduleBatchRequestEntity reqEntity2 = null;
+    for (RequestScheduleBatchRequestEntity reqEntity : batchRequestEntities) {
+      if (reqEntity.getRequestUri().equals("testUri3")) {
+        reqEntity1 = reqEntity;
+      } else if (reqEntity.getRequestUri().equals("testUri4")) {
+        reqEntity2 = reqEntity;
+      }
+    }
+    Assert.assertNotNull(reqEntity1);
+    Assert.assertNotNull(reqEntity2);
+    Assert.assertEquals(Long.valueOf(1L), reqEntity1.getBatchId());
+    Assert.assertEquals(Long.valueOf(2L), reqEntity2.getBatchId());
+    Assert.assertEquals(BatchRequest.Type.PUT.name(), reqEntity1.getRequestType());
+    Assert.assertEquals(BatchRequest.Type.POST.name(), reqEntity2.getRequestType());
+    Assert.assertEquals("11", scheduleEntity.getHours());
+  }
+
+  @Test
+  public void testGetRequestSchedule() throws Exception {
+    RequestExecution requestExecution = createRequestSchedule();
+    Assert.assertNotNull(requestExecution);
+
+    RequestScheduleEntity scheduleEntity = requestScheduleDAO.findById
+      (requestExecution.getId());
+    Assert.assertNotNull(scheduleEntity);
+
+    Assert.assertNotNull(cluster.getAllRequestExecutions().get
+      (requestExecution.getId()));
+
+    Assert.assertNotNull(scheduleEntity);
+    Assert.assertEquals(requestExecution.getBatch().getBatchSettings()
+      .getTaskFailureToleranceLimit(), scheduleEntity.getBatchTolerationLimit());
+    Assert.assertEquals(scheduleEntity.getRequestScheduleBatchRequestEntities().size(), 2);
+    Collection<RequestScheduleBatchRequestEntity> batchRequestEntities =
+      scheduleEntity.getRequestScheduleBatchRequestEntities();
+    Assert.assertNotNull(batchRequestEntities);
+    RequestScheduleBatchRequestEntity reqEntity1 = null;
+    RequestScheduleBatchRequestEntity reqEntity2 = null;
+    for (RequestScheduleBatchRequestEntity reqEntity : batchRequestEntities) {
+      if (reqEntity.getRequestUri().equals("testUri1")) {
+        reqEntity1 = reqEntity;
+      } else if (reqEntity.getRequestUri().equals("testUri2")) {
+        reqEntity2 = reqEntity;
+      }
+    }
+    Assert.assertNotNull(reqEntity1);
+    Assert.assertNotNull(reqEntity2);
+    Assert.assertEquals(Long.valueOf(1L), reqEntity1.getBatchId());
+    Assert.assertEquals(Long.valueOf(2L), reqEntity2.getBatchId());
+    Assert.assertEquals(BatchRequest.Type.DELETE.name(), reqEntity1.getRequestType());
+    Assert.assertEquals(BatchRequest.Type.POST.name(), reqEntity2.getRequestType());
+    Assert.assertEquals(requestExecution.getSchedule().getMinutes(),
+      scheduleEntity.getMinutes());
+    Assert.assertEquals(requestExecution.getSchedule().getEndTime(),
+      scheduleEntity.getEndTime());
+  }
+
+  @Test
+  public void testDeleteRequestSchedule() throws Exception {
+    RequestExecution requestExecution = createRequestSchedule();
+    Assert.assertNotNull(requestExecution);
+
+    Long id = requestExecution.getId();
+
+    requestExecution.delete();
+
+    Assert.assertNull(requestScheduleDAO.findById(id));
+    Assert.assertNull(cluster.getAllRequestExecutions().get(id));
+  }
+}

+ 1 - 0
ambari-server/src/test/resources/password.dat

@@ -0,0 +1 @@
+bigdata