Browse Source

Merge branch 'trunk' into branch-dev-patch-upgrade

Nate Cole 9 years ago
parent
commit
2eea1bfa90
100 changed files with 1564 additions and 378 deletions
  1. 1 1
      ambari-common/src/main/python/resource_management/core/logger.py
  2. 48 5
      ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
  3. 4 2
      ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
  4. 3 1
      ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
  5. 1 1
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
  6. 3 3
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
  7. 6 6
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
  8. 6 6
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
  9. 1 1
      ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
  10. 86 51
      ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java
  11. 89 0
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  12. 16 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
  13. 28 7
      ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
  14. 21 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
  15. 38 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
  16. 136 30
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
  17. 6 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
  18. 11 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
  19. 2 11
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/ServerActionExecutor.java
  20. 14 0
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
  21. 71 10
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
  22. 25 11
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
  23. 13 4
      ambari-server/src/main/java/org/apache/ambari/server/state/services/AlertNoticeDispatchService.java
  24. 55 0
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
  25. 14 0
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
  26. 1 0
      ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
  27. 2 0
      ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
  28. 2 0
      ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
  29. 4 0
      ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
  30. 2 0
      ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
  31. 2 0
      ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
  32. 1 1
      ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
  33. 19 1
      ambari-server/src/main/resources/alert-templates.xml
  34. 0 1
      ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-env.xml
  35. 0 1
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
  36. 0 1
      ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-env.xml
  37. 0 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
  38. 0 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
  39. 0 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
  40. 1 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
  41. 0 1
      ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
  42. 0 1
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-env.xml
  43. 0 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
  44. 0 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
  45. 0 1
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-env.xml
  46. 1 0
      ambari-server/src/main/resources/properties.json
  47. 25 21
      ambari-server/src/main/resources/scripts/Ambaripreupload.py
  48. 4 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
  49. 0 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
  50. 10 7
      ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
  51. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml
  52. 16 16
      ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
  53. 38 3
      ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
  54. 12 12
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
  55. 2 0
      ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
  56. 11 9
      ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java
  57. 95 0
      ambari-server/src/test/java/org/apache/ambari/server/configuration/ConfigurationTest.java
  58. 3 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  59. 2 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
  60. 24 13
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
  61. 18 17
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
  62. 2 0
      ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
  63. 124 10
      ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
  64. 12 0
      ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
  65. 2 0
      ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
  66. 6 6
      ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
  67. 12 2
      ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
  68. 46 0
      ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
  69. 1 0
      ambari-web/app/assets/test/tests.js
  70. 1 1
      ambari-web/app/controllers/main/admin/highAvailability/hawq/addStandby/step3_controller.js
  71. 5 0
      ambari-web/app/mappers/components_state_mapper.js
  72. 5 0
      ambari-web/app/messages.js
  73. 2 2
      ambari-web/app/models/alerts/alert_definition.js
  74. 1 0
      ambari-web/app/views.js
  75. 5 6
      ambari-web/app/views/common/configs/widgets/list_config_widget_view.js
  76. 15 4
      ambari-web/app/views/main/dashboard/widgets.js
  77. 190 0
      ambari-web/app/views/main/dashboard/widgets/hawqsegment_live.js
  78. 1 1
      ambari-web/test/controllers/installer_test.js
  79. 2 2
      ambari-web/test/controllers/main/admin/highAvailability/progress_popup_controller_test.js
  80. 2 6
      ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
  81. 1 1
      ambari-web/test/controllers/main/service/info/config_test.js
  82. 1 1
      ambari-web/test/controllers/main/service/widgets/create/step2_controller_test.js
  83. 0 12
      ambari-web/test/controllers/main/service_test.js
  84. 0 18
      ambari-web/test/controllers/wizard/step3_test.js
  85. 1 1
      ambari-web/test/controllers/wizard/step7_test.js
  86. 6 1
      ambari-web/test/controllers/wizard/step9_test.js
  87. 4 4
      ambari-web/test/mappers/server_data_mapper_test.js
  88. 5 5
      ambari-web/test/mixins/common/configs/configs_saver_test.js
  89. 1 1
      ambari-web/test/mixins/main/host/details/host_components/install_component_test.js
  90. 1 2
      ambari-web/test/models/alerts/alert_instance_test.js
  91. 1 1
      ambari-web/test/utils/form_field_test.js
  92. 2 2
      ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
  93. 1 1
      ambari-web/test/views/common/table_view_test.js
  94. 4 2
      ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
  95. 2 2
      ambari-web/test/views/main/alert_definitions_view_test.js
  96. 10 8
      ambari-web/test/views/main/dashboard/widget_test.js
  97. 69 0
      ambari-web/test/views/main/dashboard/widgets/hawqsegment_live_test.js
  98. 1 1
      ambari-web/test/views/main/service/info/metrics/ambari_metrics/regionserver_base_test.js
  99. 2 2
      ambari-web/test/views/wizard/step5_view_test.js
  100. 23 8
      ambari-web/test/views/wizard/step9_view_test.js

+ 1 - 1
ambari-common/src/main/python/resource_management/core/logger.py

@@ -173,4 +173,4 @@ class Logger:
     if arguments_str:
       arguments_str = arguments_str[:-2]
         
-    return unicode("{0} {{{1}}}").format(name, arguments_str)
+    return unicode("{0} {{{1}}}", 'UTF-8').format(name, arguments_str)

+ 48 - 5
ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java

@@ -28,9 +28,12 @@ import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.SSLSocketFactory;
 import javax.net.ssl.TrustManagerFactory;
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
@@ -72,17 +75,19 @@ public abstract class AbstractTimelineMetricsSink {
   protected void emitMetrics(TimelineMetrics metrics) {
     String connectUrl = getCollectorUri();
     int timeout = getTimeoutSeconds() * 1000;
+    HttpURLConnection connection = null;
     try {
       if (connectUrl == null) {
         throw new IOException("Unknown URL. " +
           "Unable to connect to metrics collector.");
       }
       String jsonData = mapper.writeValueAsString(metrics);
-      HttpURLConnection connection = connectUrl.startsWith("https") ?
+      connection = connectUrl.startsWith("https") ?
         getSSLConnection(connectUrl) : getConnection(connectUrl);
 
       connection.setRequestMethod("POST");
       connection.setRequestProperty("Content-Type", "application/json");
+      connection.setRequestProperty("Connection", "Keep-Alive");
       connection.setConnectTimeout(timeout);
       connection.setReadTimeout(timeout);
       connection.setDoOutput(true);
@@ -103,14 +108,52 @@ public abstract class AbstractTimelineMetricsSink {
           LOG.debug("Metrics posted to Collector " + connectUrl);
         }
       }
-    } catch (IOException e) {
+      cleanupInputStream(connection.getInputStream());
+    } catch (IOException ioe) {
+      StringBuilder errorMessage =
+        new StringBuilder("Unable to connect to collector, " + connectUrl + "\n");
+      try {
+        if ((connection != null)) {
+          errorMessage.append(cleanupInputStream(connection.getErrorStream()));
+        }
+      } catch (IOException e) {
+        //NOP
+      }
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Unable to connect to collector, " + connectUrl, e);
+        LOG.debug(errorMessage, ioe);
       } else {
-        LOG.info("Unable to connect to collector, " + connectUrl);
+        LOG.info(errorMessage);
+      }
+      throw new UnableToConnectException(ioe).setConnectUrl(connectUrl);
+    }
+  }
+
+  /**
+   * Cleans up and closes an input stream
+   * see http://docs.oracle.com/javase/6/docs/technotes/guides/net/http-keepalive.html
+   * @param is the InputStream to clean up
+   * @return string read from the InputStream
+   * @throws IOException
+   */
+  private String cleanupInputStream(InputStream is) throws IOException {
+    StringBuilder sb = new StringBuilder();
+    if (is != null) {
+      try (
+        InputStreamReader isr = new InputStreamReader(is);
+        BufferedReader br = new BufferedReader(isr)
+      ) {
+        // read the response body
+        String line;
+        while ((line = br.readLine()) != null) {
+          if (LOG.isDebugEnabled()) {
+            sb.append(line);
+          }
+        }
+      } finally {
+        is.close();
       }
-      throw new UnableToConnectException(e).setConnectUrl(connectUrl);
     }
+    return sb.toString();
   }
 
   // Get a connection

+ 4 - 2
ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java

@@ -55,8 +55,8 @@ import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 public class ApplicationHistoryServer extends CompositeService {
 
   public static final int SHUTDOWN_HOOK_PRIORITY = 30;
-  private static final Log LOG = LogFactory
-    .getLog(ApplicationHistoryServer.class);
+  private static final Log LOG =
+    LogFactory.getLog(ApplicationHistoryServer.class);
 
   ApplicationHistoryClientService ahsClientService;
   ApplicationHistoryManager historyManager;
@@ -172,6 +172,8 @@ public class ApplicationHistoryServer extends CompositeService {
     LOG.info("Instantiating AHSWebApp at " + bindAddress);
     try {
       Configuration conf = metricConfiguration.getMetricsConf();
+      conf.set("hadoop.http.max.threads", String.valueOf(metricConfiguration
+        .getTimelineMetricsServiceHandlerThreadCount()));
       HttpConfig.Policy policy = HttpConfig.Policy.valueOf(
         conf.get(TimelineMetricConfiguration.TIMELINE_SERVICE_HTTP_POLICY,
           HttpConfig.Policy.HTTP_ONLY.name()));

+ 3 - 1
ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java

@@ -24,6 +24,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.ProtocolException;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Implements MetricsSender and provides a way of pushing metrics to application metrics history service using REST
@@ -65,7 +66,8 @@ public class RestMetricsSender implements MetricsSender {
       responseString = svc.send(payload);
 
       timer.stop();
-      LOG.info("http response time: " + timer.elapsedMillis() + " ms");
+      LOG.info("http response time: " + timer.elapsed(TimeUnit.MILLISECONDS)
+        + " ms");
 
       if (responseString.length() > 0) {
         LOG.debug("POST response from server: " + responseString);

+ 1 - 1
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java

@@ -58,7 +58,7 @@ public class ApplicationHistoryStoreTestUtils {
       ApplicationAttemptId appAttemptId) throws IOException {
     store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance(
       appAttemptId, appAttemptId.toString(), 0,
-      ContainerId.newInstance(appAttemptId, 1)));
+      ContainerId.newContainerId(appAttemptId, 1)));
   }
 
   protected void writeApplicationAttemptFinishData(

+ 3 - 3
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java

@@ -168,7 +168,7 @@ public class TestApplicationHistoryClientService extends
     writeApplicationStartData(appId);
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 1);
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
     writeContainerStartData(containerId);
     writeContainerFinishData(containerId);
     writeApplicationFinishData(appId);
@@ -189,8 +189,8 @@ public class TestApplicationHistoryClientService extends
     writeApplicationStartData(appId);
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 1);
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
-    ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
+    ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 2);
     writeContainerStartData(containerId);
     writeContainerFinishData(containerId);
     writeContainerStartData(containerId1);

+ 6 - 6
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java

@@ -94,7 +94,7 @@ public class TestFileSystemApplicationHistoryStore extends
         }
         // write container history data
         for (int k = 1; k <= num; ++k) {
-          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
           writeContainerStartData(containerId);
           if (missingContainer && k == num) {
             continue;
@@ -144,7 +144,7 @@ public class TestFileSystemApplicationHistoryStore extends
         // read container history data
         Assert.assertEquals(num, store.getContainers(appAttemptId).size());
         for (int k = 1; k <= num; ++k) {
-          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
           ContainerHistoryData containerData = store.getContainer(containerId);
           Assert.assertNotNull(containerData);
           Assert.assertEquals(Priority.newInstance(containerId.getId()),
@@ -159,7 +159,7 @@ public class TestFileSystemApplicationHistoryStore extends
         ContainerHistoryData masterContainer =
             store.getAMContainer(appAttemptId);
         Assert.assertNotNull(masterContainer);
-        Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+        Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
           masterContainer.getContainerId());
       }
     }
@@ -186,7 +186,7 @@ public class TestFileSystemApplicationHistoryStore extends
       Assert.assertTrue(e.getMessage().contains("is not opened"));
     }
     // write container history data
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
     try {
       writeContainerStartData(containerId);
       Assert.fail();
@@ -209,8 +209,8 @@ public class TestFileSystemApplicationHistoryStore extends
     writeApplicationStartData(appId);
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 1);
-    for (int i = 1; i <= 100000; ++i) {
-      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+    for (int i = 1; i <= 1000; ++i) {
+      ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
       writeContainerStartData(containerId);
       writeContainerFinishData(containerId);
     }

+ 6 - 6
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java

@@ -137,7 +137,7 @@ public class TestMemoryApplicationHistoryStore extends
     ApplicationId appId = ApplicationId.newInstance(0, 1);
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 1);
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
     try {
       writeContainerFinishData(containerId);
       Assert.fail();
@@ -149,14 +149,14 @@ public class TestMemoryApplicationHistoryStore extends
     writeApplicationAttemptStartData(appAttemptId);
     int numContainers = 5;
     for (int i = 1; i <= numContainers; ++i) {
-      containerId = ContainerId.newInstance(appAttemptId, i);
+      containerId = ContainerId.newContainerId(appAttemptId, i);
       writeContainerStartData(containerId);
       writeContainerFinishData(containerId);
     }
     Assert
       .assertEquals(numContainers, store.getContainers(appAttemptId).size());
     for (int i = 1; i <= numContainers; ++i) {
-      containerId = ContainerId.newInstance(appAttemptId, i);
+      containerId = ContainerId.newContainerId(appAttemptId, i);
       ContainerHistoryData data = store.getContainer(containerId);
       Assert.assertNotNull(data);
       Assert.assertEquals(Priority.newInstance(containerId.getId()),
@@ -165,11 +165,11 @@ public class TestMemoryApplicationHistoryStore extends
     }
     ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId);
     Assert.assertNotNull(masterContainer);
-    Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+    Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
       masterContainer.getContainerId());
     writeApplicationAttemptFinishData(appAttemptId);
     // Write again
-    containerId = ContainerId.newInstance(appAttemptId, 1);
+    containerId = ContainerId.newContainerId(appAttemptId, 1);
     try {
       writeContainerStartData(containerId);
       Assert.fail();
@@ -195,7 +195,7 @@ public class TestMemoryApplicationHistoryStore extends
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 1);
     for (int i = 1; i <= numContainers; ++i) {
-      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+      ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
       writeContainerStartData(containerId);
       writeContainerFinishData(containerId);
     }

+ 1 - 1
ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java

@@ -269,7 +269,7 @@ public class TestAHSWebServices extends JerseyTest {
     ApplicationId appId = ApplicationId.newInstance(0, 1);
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(appId, 1);
-    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
     WebResource r = resource();
     ClientResponse response =
         r.path("ws").path("v1").path("applicationhistory").path("apps")

+ 86 - 51
ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java

@@ -59,6 +59,7 @@ public class CheckDatabaseHelper {
   private AmbariMetaInfo ambariMetaInfo;
   private Injector injector;
   private boolean errorAvailable = false;
+  private boolean warningAvailable = false;
 
   @Inject
   public CheckDatabaseHelper(DBAccessor dbAccessor,
@@ -119,11 +120,19 @@ public class CheckDatabaseHelper {
     this.errorAvailable = errorAvailable;
   }
 
+  public boolean isWarningAvailable() {
+    return warningAvailable;
+  }
+
+  public void setWarningAvailable(boolean warningAvailable) {
+    this.warningAvailable = warningAvailable;
+  }
+
   /*
-  * This method checks if all configurations that we have in clusterconfig table
-  * have at least one mapping in clusterconfigmapping table. If we found not mapped config
-  * then we are showing warning message for user.
-  * */
+    * This method checks if all configurations that we have in clusterconfig table
+    * have at least one mapping in clusterconfigmapping table. If we found not mapped config
+    * then we are showing warning message for user.
+    * */
   protected void checkForNotMappedConfigsToCluster() {
     String GET_NOT_MAPPED_CONFIGS_QUERY = "select type_name from clusterconfig where type_name not in (select type_name from clusterconfigmapping)";
     Set<String> nonSelectedConfigs = new HashSet<>();
@@ -137,7 +146,8 @@ public class CheckDatabaseHelper {
         }
       }
       if (!nonSelectedConfigs.isEmpty()) {
-        LOG.warn("You have config(s) that is(are) not mapped to any cluster: " + StringUtils.join(nonSelectedConfigs, ","));
+        LOG.warn("You have config(s): {} that is(are) not mapped (in clusterconfigmapping table) to any cluster!", StringUtils.join(nonSelectedConfigs, ","));
+        warningAvailable = true;
       }
     } catch (SQLException e) {
       LOG.error("Exception occurred during check for not mapped configs to cluster procedure: ", e);
@@ -159,25 +169,27 @@ public class CheckDatabaseHelper {
   * than one selected version it's a bug and we are showing error message for user.
   * */
   protected void checkForConfigsSelectedMoreThanOnce() {
-    String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name,type_name from clusterconfigmapping ccm " +
+    String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name, ccm.type_name from clusterconfigmapping ccm " +
             "join clusters c on ccm.cluster_id=c.cluster_id " +
-            "group by c.cluster_name,type_name " +
+            "group by c.cluster_name, ccm.type_name " +
             "having sum(selected) > 1";
-    Multimap<String, String> configsSelectedMoreThanOnce = HashMultimap.create();
+    Multimap<String, String> clusterConfigTypeMap = HashMultimap.create();
     ResultSet rs = null;
     try {
       Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
       rs = statement.executeQuery(GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY);
       if (rs != null) {
         while (rs.next()) {
-          configsSelectedMoreThanOnce.put(rs.getString("cluster_name"), rs.getString("type_name"));
+          clusterConfigTypeMap.put(rs.getString("cluster_name"), rs.getString("type_name"));
+        }
+
+        for (String clusterName : clusterConfigTypeMap.keySet()) {
+          LOG.error("You have config(s), in cluster {}, that is(are) selected more than once in clusterconfigmapping table: {}",
+                  clusterName ,StringUtils.join(clusterConfigTypeMap.get(clusterName), ","));
+          errorAvailable = true;
         }
       }
-      for (String clusterName : configsSelectedMoreThanOnce.keySet()) {
-        LOG.error(String.format("You have config(s), in cluster %s, that is(are) selected more than once in clusterconfigmapping: %s",
-                clusterName ,StringUtils.join(configsSelectedMoreThanOnce.get(clusterName), ",")));
-        errorAvailable = true;
-      }
+
     } catch (SQLException e) {
       LOG.error("Exception occurred during check for config selected more than ones procedure: ", e);
     } finally {
@@ -207,12 +219,13 @@ public class CheckDatabaseHelper {
         while (rs.next()) {
           hostsWithoutStatus.add(rs.getString("host_name"));
         }
-      }
 
-      if (!hostsWithoutStatus.isEmpty()) {
-        LOG.error("You have host(s) without status: " + StringUtils.join(hostsWithoutStatus, ","));
-        errorAvailable = true;
+        if (!hostsWithoutStatus.isEmpty()) {
+          LOG.error("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ","));
+          errorAvailable = true;
+        }
       }
+
     } catch (SQLException e) {
       LOG.error("Exception occurred during check for host without state procedure: ", e);
     } finally {
@@ -236,7 +249,7 @@ public class CheckDatabaseHelper {
     String GET_HOST_COMPONENT_STATE_COUNT_QUERY = "select count(*) from hostcomponentstate";
     String GET_HOST_COMPONENT_DESIRED_STATE_COUNT_QUERY = "select count(*) from hostcomponentdesiredstate";
     String GET_MERGED_TABLE_ROW_COUNT_QUERY = "select count(*) FROM hostcomponentstate hcs " +
-            "JOIN hostcomponentdesiredstate hcds ON hcs.service_name = hcds.service_name AND hcs.component_name = hcds.component_name AND hcs.host_id = hcds.host_id";
+            "JOIN hostcomponentdesiredstate hcds ON hcs.service_name=hcds.service_name AND hcs.component_name=hcds.component_name AND hcs.host_id=hcds.host_id";
     int hostComponentStateCount = 0;
     int hostComponentDesiredStateCount = 0;
     int mergedCount = 0;
@@ -266,7 +279,7 @@ public class CheckDatabaseHelper {
       }
 
       if (hostComponentStateCount != hostComponentDesiredStateCount || hostComponentStateCount != mergedCount) {
-        LOG.error("Your host component states(hostcomponentstate table) count not equals host component desired states(hostcomponentdesiredstate table) count!");
+        LOG.error("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!");
         errorAvailable = true;
       }
 
@@ -297,29 +310,31 @@ public class CheckDatabaseHelper {
     String GET_SERVICES_WITHOUT_CONFIGS_QUERY = "select c.cluster_name, service_name from clusterservices cs " +
             "join clusters c on cs.cluster_id=c.cluster_id " +
             "where service_name not in (select service_name from serviceconfig sc where sc.cluster_id=cs.cluster_id and sc.service_name=cs.service_name and sc.group_id is null)";
-    String GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY = "select service_name from serviceconfig where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null";
+    String GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY = "select c.cluster_name, sc.service_name, sc.version from serviceconfig sc " +
+            "join clusters c on sc.cluster_id=c.cluster_id " +
+            "where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null";
     String GET_STACK_NAME_VERSION_QUERY = "select c.cluster_name, s.stack_name, s.stack_version from clusters c " +
             "join stack s on c.desired_stack_id = s.stack_id";
-    String GET_SERVICES_WITH_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, type_name, sc.version from clusterservices cs " +
+    String GET_SERVICES_WITH_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, cc.type_name, sc.version from clusterservices cs " +
             "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
             "join clusterconfig cc on scm.config_id=cc.config_id and sc.cluster_id=cc.cluster_id " +
             "join clusters c on cc.cluster_id=c.cluster_id " +
             "where sc.group_id is null " +
-            "group by c.cluster_name, cs.service_name, type_name, sc.version";
-    String GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY = "select c.cluster_name, cs.service_name,cc.type_name from clusterservices cs " +
+            "group by c.cluster_name, cs.service_name, cc.type_name, sc.version";
+    String GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, cc.type_name from clusterservices cs " +
             "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
             "join clusterconfig cc on scm.config_id=cc.config_id and cc.cluster_id=sc.cluster_id " +
             "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag and cc.cluster_id=ccm.cluster_id " +
             "join clusters c on ccm.cluster_id=c.cluster_id " +
             "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " +
-            "group by c.cluster_name,cs.service_name,cc.type_name " +
+            "group by c.cluster_name, cs.service_name, cc.type_name " +
             "having sum(ccm.selected) < 1";
-    Multimap<String, String> servicesWithoutConfigs = HashMultimap.create();
+    Multimap<String, String> clusterServiceMap = HashMultimap.create();
     Map<String, Map<String, String>>  clusterStackInfo = new HashMap<>();
-    Set<String> servicesWithoutMappedConfigs = new HashSet<>();
-    Map<String, Multimap<String, String>> notSelectedServiceConfigs = new HashMap<>();
+    Map<String, Multimap<String, String>> clusterServiceVersionMap = new HashMap<>();
+    Map<String, Multimap<String, String>> clusterServiceConfigType = new HashMap<>();
     ResultSet rs = null;
 
     try {
@@ -328,27 +343,45 @@ public class CheckDatabaseHelper {
       rs = statement.executeQuery(GET_SERVICES_WITHOUT_CONFIGS_QUERY);
       if (rs != null) {
         while (rs.next()) {
-          servicesWithoutConfigs.put(rs.getString("cluster_name"), rs.getString("service_name"));
+          clusterServiceMap.put(rs.getString("cluster_name"), rs.getString("service_name"));
+        }
+
+        for (String clusterName : clusterServiceMap.keySet()) {
+          LOG.error("Service(s): {}, from cluster {} has no config(s) in serviceconfig table!", StringUtils.join(clusterServiceMap.get(clusterName), ","), clusterName);
+          errorAvailable = true;
         }
-      }
 
-      for (String clusterName : servicesWithoutConfigs.keySet()) {
-        LOG.error(String.format("Service(s): %s, from cluster %s has no config(s) in serviceconfig table!", StringUtils.join(servicesWithoutConfigs.get(clusterName), ","), clusterName));
-        errorAvailable = true;
       }
 
       rs = statement.executeQuery(GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY);
       if (rs != null) {
+        String serviceName = null, version = null, clusterName = null;
         while (rs.next()) {
-          servicesWithoutMappedConfigs.add(rs.getString("service_name"));
+          serviceName = rs.getString("service_name");
+          clusterName = rs.getString("cluster_name");
+          version = rs.getString("version");
+
+          if (clusterServiceVersionMap.get(clusterName) != null) {
+            Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clusterName);
+            serviceVersion.put(serviceName, version);
+          } else {
+            Multimap<String, String> serviceVersion = HashMultimap.create();;
+            serviceVersion.put(serviceName, version);
+            clusterServiceVersionMap.put(clusterName, serviceVersion);
+          }
+        }
+
+        for (String clName : clusterServiceVersionMap.keySet()) {
+          Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clName);
+          for (String servName : serviceVersion.keySet()) {
+            LOG.error("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ","));
+            errorAvailable = true;
+          }
         }
-      }
 
-      if (!servicesWithoutMappedConfigs.isEmpty()) {
-        LOG.error("You have service(s) without mapped configs in serviceconfigmapping: " + StringUtils.join(servicesWithoutMappedConfigs, ","));
-        errorAvailable = true;
       }
 
+      //get stack info from db
       rs = statement.executeQuery(GET_STACK_NAME_VERSION_QUERY);
       if (rs != null) {
         while (rs.next()) {
@@ -375,6 +408,7 @@ public class CheckDatabaseHelper {
 
           serviceNames.add(serviceName);
 
+          //collect data about mapped configs to services from db
           if (dbClusterServiceVersionConfigs.get(clusterName) != null) {
             Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName);
 
@@ -386,18 +420,18 @@ public class CheckDatabaseHelper {
               dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs);
             }
           } else {
-
             Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = new HashMap<>();
             Multimap<String, String> dbServiceConfigs = HashMultimap.create();
             dbServiceConfigs.put(serviceName, configType);
             dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs);
             dbClusterServiceVersionConfigs.put(clusterName, dbServiceVersionConfigs);
-
           }
         }
       }
 
+      //compare service configs from stack with configs that we got from db
       for (Map.Entry<String, Map<String, String>> clusterStackInfoEntry : clusterStackInfo.entrySet()) {
+        //collect required configs for all services from stack
         String clusterName = clusterStackInfoEntry.getKey();
         Map<String, String> stackInfo = clusterStackInfoEntry.getValue();
         String stackName = stackInfo.keySet().iterator().next();
@@ -411,6 +445,7 @@ public class CheckDatabaseHelper {
           }
         }
 
+        //compare required service configs from stack with mapped service configs from db
         Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName);
         for (Integer serviceVersion : dbServiceVersionConfigs.keySet()) {
           Multimap<String, String> dbServiceConfigs = dbServiceVersionConfigs.get(serviceVersion);
@@ -420,8 +455,8 @@ public class CheckDatabaseHelper {
             if (serviceConfigsFromDB != null && serviceConfigsFromStack != null) {
               serviceConfigsFromStack.removeAll(serviceConfigsFromDB);
               if (!serviceConfigsFromStack.isEmpty()) {
-                LOG.error(String.format("Required config(s): %s is(are) not available for service %s with service config version %s for cluster %s",
-                        StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName));
+                LOG.error("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}",
+                        StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName);
                 errorAvailable = true;
               }
             }
@@ -429,7 +464,7 @@ public class CheckDatabaseHelper {
         }
       }
 
-
+      //getting services which has mapped configs which are not selected in clusterconfigmapping
       rs = statement.executeQuery(GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY);
       if (rs != null) {
         String serviceName = null, configType = null, clusterName = null;
@@ -439,24 +474,24 @@ public class CheckDatabaseHelper {
           configType = rs.getString("type_name");
 
 
-          if (notSelectedServiceConfigs.get(clusterName) != null) {
-            Multimap<String, String> serviceConfigs = notSelectedServiceConfigs.get(clusterName);
+          if (clusterServiceConfigType.get(clusterName) != null) {
+            Multimap<String, String> serviceConfigs = clusterServiceConfigType.get(clusterName);
             serviceConfigs.put(serviceName, configType);
           } else {
 
             Multimap<String, String> serviceConfigs = HashMultimap.create();
             serviceConfigs.put(serviceName, configType);
-            notSelectedServiceConfigs.put(clusterName, serviceConfigs);
+            clusterServiceConfigType.put(clusterName, serviceConfigs);
 
           }
 
         }
       }
 
-      for (String clusterName : notSelectedServiceConfigs.keySet()) {
-        Multimap<String, String> serviceConfig = notSelectedServiceConfigs.get(clusterName);
+      for (String clusterName : clusterServiceConfigType.keySet()) {
+        Multimap<String, String> serviceConfig = clusterServiceConfigType.get(clusterName);
         for (String serviceName : serviceConfig.keySet()) {
-          LOG.error(String.format("You have non selected configs: %s for service %s from cluster %s!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName));
+          LOG.error("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName);
           errorAvailable = true;
         }
       }
@@ -515,8 +550,8 @@ public class CheckDatabaseHelper {
     } finally {
       if (checkDatabaseHelper != null) {
         checkDatabaseHelper.closeConnection();
-        if (checkDatabaseHelper.isErrorAvailable()) {
-          System.out.print("Some error(s) was(were) found. Please check ambari-server-check-database.log for problem(s).");
+        if (checkDatabaseHelper.isErrorAvailable() || checkDatabaseHelper.isWarningAvailable()) {
+          System.out.print("Some error(s) or/and warning(s) was(were) found. Please check ambari-server-check-database.log for problem(s).");
         } else {
           System.out.print("No erros were found.");
         }

+ 89 - 0
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -184,6 +184,9 @@ public class Configuration {
   public static final String LDAP_REFERRAL_KEY = "authentication.ldap.referral";
   public static final String LDAP_PAGINATION_ENABLED_KEY = "authentication.ldap.pagination.enabled";
   public static final String SERVER_EC_CACHE_SIZE = "server.ecCacheSize";
+  public static final String SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED = "server.hrcStatusSummary.cache.enabled";
+  public static final String SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE = "server.hrcStatusSummary.cache.size";
+  public static final String SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION = "server.hrcStatusSummary.cache.expiryDuration";
   public static final String SERVER_STALE_CONFIG_CACHE_ENABLED_KEY = "server.cache.isStale.enabled";
   public static final String SERVER_PERSISTENCE_TYPE_KEY = "server.persistence.type";
   public static final String SERVER_JDBC_USER_NAME_KEY = "server.jdbc.user.name";
@@ -278,6 +281,9 @@ public class Configuration {
   public static final String TEMPORARY_KEYSTORE_ACTIVELY_PURGE = "security.temporary.keystore.actibely.purge";
   public static final boolean TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT = true;
 
+  // Alerts notifications properties
+  public static final String AMBARI_DISPLAY_URL = "ambari.display.url";
+
   /**
    * Key for repo validation suffixes.
    */
@@ -364,6 +370,11 @@ public class Configuration {
 
   public static final String CUSTOM_ACTION_DEFINITION_KEY = "custom.action.definitions";
   public static final String SHARED_RESOURCES_DIR_KEY = "shared.resources.dir";
+
+  protected static final boolean SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED_DEFAULT = true;
+  protected static final long SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT = 10000L;
+  protected static final long SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT = 30; //minutes
+
   private static final String CUSTOM_ACTION_DEFINITION_DEF_VALUE = "/var/lib/ambari-server/resources/custom_action_definitions";
 
   private static final long SERVER_EC_CACHE_SIZE_DEFAULT = 10000L;
@@ -1773,6 +1784,75 @@ public class Configuration {
     return value;
   }
 
+  /**
+   * Caching of host role command status summary can be enabled/disabled
+   * through the {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED} config property.
+   * This method returns the value of {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED}
+   * config property. If this config property is not defined than returns the default defined by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED_DEFAULT}.
+   * @return true if caching is to be enabled otherwise false.
+   */
+  public boolean getHostRoleCommandStatusSummaryCacheEnabled() {
+    String stringValue = properties.getProperty(SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED);
+    boolean value = SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED_DEFAULT;
+    if (stringValue != null) {
+      try {
+        value = Boolean.valueOf(stringValue);
+      }
+      catch (NumberFormatException ignored) {
+      }
+
+    }
+
+    return value;
+  }
+
+  /**
+   * In order to avoid the cache storing host role command status summary objects exhaust
+   * memory we set a max record number allowed for the cache. This limit can be configured
+   * through {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE} config property. The method returns
+   * the value of this config property. If this config property is not defined than
+   * the default value specified by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT} is returned.
+   * @return the upper limit for the number of cached host role command summaries.
+   */
+  public long getHostRoleCommandStatusSummaryCacheSize() {
+    String stringValue = properties.getProperty(SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE);
+    long value = SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT;
+    if (stringValue != null) {
+      try {
+        value = Long.valueOf(stringValue);
+      }
+      catch (NumberFormatException ignored) {
+      }
+
+    }
+
+    return value;
+  }
+
+  /**
+   * As a safety measure the cache storing host role command status summaries should auto expire after a while.
+   * The expiry duration is specified through the {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION} config property
+   * expressed in minutes. The method returns the value of this config property. If this config property is not defined than
+   * the default value specified by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT}
+   * @return the cache expiry duration in minutes
+   */
+  public long getHostRoleCommandStatusSummaryCacheExpiryDuration() {
+    String stringValue = properties.getProperty(SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION);
+    long value = SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT;
+    if (stringValue != null) {
+      try {
+        value = Long.valueOf(stringValue);
+      }
+      catch (NumberFormatException ignored) {
+      }
+
+    }
+
+    return value;
+  }
+
+
+
   /**
    * @return whether staleConfig's flag is cached.
    */
@@ -2500,6 +2580,15 @@ public class Configuration {
     return Integer.parseInt(properties.getProperty(ALERTS_CACHE_SIZE, ALERTS_CACHE_SIZE_DEFAULT));
   }
 
+  /**
+   * Get the ambari display URL
+   * @return
+   */
+  public String getAmbariDisplayUrl() {
+    return properties.getProperty(AMBARI_DISPLAY_URL, null);
+  }
+
+
   /**
    * @return number of retry attempts for api and blueprint operations
    */

+ 16 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java

@@ -65,6 +65,7 @@ import org.apache.ambari.server.notifications.NotificationDispatcher;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessorImpl;
 import org.apache.ambari.server.orm.PersistenceType;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.scheduler.ExecutionScheduler;
 import org.apache.ambari.server.scheduler.ExecutionSchedulerImpl;
 import org.apache.ambari.server.security.AmbariEntryPoint;
@@ -338,6 +339,21 @@ public class ControllerModule extends AbstractModule {
     bindConstant().annotatedWith(Names.named("executionCommandCacheSize")).
         to(configuration.getExecutionCommandsCacheSize());
 
+
+    // Host role commands status summary max cache enable/disable
+    bindConstant().annotatedWith(Names.named(HostRoleCommandDAO.HRC_STATUS_SUMMARY_CACHE_ENABLED)).
+      to(configuration.getHostRoleCommandStatusSummaryCacheEnabled());
+
+    // Host role commands status summary max cache size
+    bindConstant().annotatedWith(Names.named(HostRoleCommandDAO.HRC_STATUS_SUMMARY_CACHE_SIZE)).
+      to(configuration.getHostRoleCommandStatusSummaryCacheSize());
+    // Host role command status summary cache expiry duration in minutes
+    bindConstant().annotatedWith(Names.named(HostRoleCommandDAO.HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_MINUTES)).
+      to(configuration.getHostRoleCommandStatusSummaryCacheExpiryDuration());
+
+
+
+
     bind(AmbariManagementController.class).to(
       AmbariManagementControllerImpl.class);
     bind(AbstractRootServiceResponseFactory.class).to(RootServiceResponseFactory.class);

+ 28 - 7
ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java

@@ -31,21 +31,28 @@ public class ServiceComponentRequest {
 
   private String componentCategory;
 
+  private String recoveryEnabled; // CREATE/UPDATE
+
   public ServiceComponentRequest(String clusterName, String serviceName,
                                  String componentName, String desiredState) {
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.componentName = componentName;
-    this.desiredState = desiredState;
+    this(clusterName, serviceName, componentName, desiredState, null, null);
+  }
+
+  public ServiceComponentRequest(String clusterName, String serviceName,
+                                 String componentName, String desiredState,
+                                 String recoveryEnabled) {
+    this(clusterName, serviceName, componentName, desiredState, recoveryEnabled, null);
   }
 
   public ServiceComponentRequest(String clusterName,
                                  String serviceName, String componentName,
-                                 String desiredState, String componentCategory) {
+                                 String desiredState, String recoveryEnabled,
+                                 String componentCategory) {
     this.clusterName = clusterName;
     this.serviceName = serviceName;
     this.componentName = componentName;
     this.desiredState = desiredState;
+    this.recoveryEnabled = recoveryEnabled;
     this.componentCategory = componentCategory;
   }
 
@@ -105,6 +112,20 @@ public class ServiceComponentRequest {
     this.clusterName = clusterName;
   }
 
+  /**
+   * @return recoveryEnabled
+   */
+  public String getRecoveryEnabled() {
+    return recoveryEnabled;
+  }
+
+  /**
+   * @param recoveryEnabled the recoveryEnabled value to set.
+   */
+  public void setRecoveryEnabled(String recoveryEnabled) {
+    this.recoveryEnabled = recoveryEnabled;
+  }
+
   public String getComponentCategory() {
     return componentCategory;
   }
@@ -115,7 +136,7 @@ public class ServiceComponentRequest {
 
   @Override
   public String toString() {
-    return String.format("[clusterName=%s, serviceName=%s, componentName=%s, desiredState=%s, componentCategory=%s]",
-        clusterName, serviceName, clusterName, desiredState, componentCategory);
+    return String.format("[clusterName=%s, serviceName=%s, componentName=%s, desiredState=%s, recoveryEnabled=%s, componentCategory=%s]",
+        clusterName, serviceName, clusterName, desiredState, recoveryEnabled, componentCategory);
   }
 }

+ 21 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java

@@ -41,6 +41,8 @@ public class ServiceComponentResponse {
 
   private int installedCount;
 
+  private boolean recoveryEnabled;
+
   public ServiceComponentResponse(Long clusterId, String clusterName,
                                   String serviceName,
                                   String componentName,
@@ -48,7 +50,8 @@ public class ServiceComponentResponse {
                                   String desiredState,
                                   int totalCount,
                                   int startedCount,
-                                  int installedCount) {
+                                  int installedCount,
+                                  boolean recoveryEnabled) {
     super();
     this.clusterId = clusterId;
     this.clusterName = clusterName;
@@ -59,6 +62,7 @@ public class ServiceComponentResponse {
     this.totalCount = totalCount;
     this.startedCount = startedCount;
     this.installedCount = installedCount;
+    this.recoveryEnabled = recoveryEnabled;
   }
 
   /**
@@ -211,6 +215,22 @@ public class ServiceComponentResponse {
     this.totalCount = totalCount;
   }
 
+  /**
+   * Get a true or false value indicating if the service component is auto start enabled
+   * @return true or false
+   */
+  public boolean isRecoveryEnabled() {
+    return recoveryEnabled;
+  }
+
+  /**
+   * Set a true or false value indicating whether the service component is auto start enabled
+   * @param recoveryEnabled
+   */
+  public void setRecoveryEnabled(boolean recoveryEnabled) {
+    this.recoveryEnabled = recoveryEnabled;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;

+ 38 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java

@@ -84,6 +84,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
   protected static final String COMPONENT_TOTAL_COUNT_PROPERTY_ID     = "ServiceComponentInfo/total_count";
   protected static final String COMPONENT_STARTED_COUNT_PROPERTY_ID   = "ServiceComponentInfo/started_count";
   protected static final String COMPONENT_INSTALLED_COUNT_PROPERTY_ID = "ServiceComponentInfo/installed_count";
+  protected static final String COMPONENT_RECOVERY_ENABLED_ID         = "ServiceComponentInfo/recovery_enabled";
 
   private static final String TRUE = "true";
 
@@ -178,6 +179,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
       setResourceProperty(resource, COMPONENT_TOTAL_COUNT_PROPERTY_ID, response.getTotalCount(), requestedIds);
       setResourceProperty(resource, COMPONENT_STARTED_COUNT_PROPERTY_ID, response.getStartedCount(), requestedIds);
       setResourceProperty(resource, COMPONENT_INSTALLED_COUNT_PROPERTY_ID, response.getInstalledCount(), requestedIds);
+      setResourceProperty(resource, COMPONENT_RECOVERY_ENABLED_ID, String.valueOf(response.isRecoveryEnabled()), requestedIds);
 
       resources.add(resource);
     }
@@ -251,6 +253,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
         (String) properties.get(COMPONENT_SERVICE_NAME_PROPERTY_ID),
         (String) properties.get(COMPONENT_COMPONENT_NAME_PROPERTY_ID),
         (String) properties.get(COMPONENT_STATE_PROPERTY_ID),
+        (String) properties.get(COMPONENT_RECOVERY_ENABLED_ID),
         (String) properties.get(COMPONENT_CATEGORY_PROPERTY_ID));
   }
 
@@ -463,6 +466,9 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
     Map<String, Map<String, Set<String>>> componentNames = new HashMap<>();
     Set<State> seenNewStates = new HashSet<>();
 
+    Collection<ServiceComponent> recoveryEnabledComponents = new ArrayList<>();
+    Collection<ServiceComponent> recoveryDisabledComponents = new ArrayList<>();
+
     // Determine operation level
     Resource.Type reqOpLvl;
     if (requestProperties.containsKey(RequestOperationLevel.OPERATION_LEVEL_ID)) {
@@ -513,6 +519,20 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
         continue;
       }
 
+      // Gather the components affected by the change in
+      // auto start state
+      if (!StringUtils.isEmpty(request.getRecoveryEnabled())) {
+        boolean newRecoveryEnabled = Boolean.parseBoolean(request.getRecoveryEnabled());
+        boolean oldRecoveryEnabled = sc.isRecoveryEnabled();
+        if (newRecoveryEnabled != oldRecoveryEnabled) {
+          if (newRecoveryEnabled) {
+            recoveryEnabledComponents.add(sc);
+          } else {
+            recoveryDisabledComponents.add(sc);
+          }
+        }
+      }
+
       if (newState == null) {
         debug("Nothing to do for new updateServiceComponent request, request ={}, newDesiredState=null" + request);
         continue;
@@ -539,9 +559,11 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
               + ", clusterId=" + cluster.getClusterId()
               + ", serviceName=" + sc.getServiceName()
               + ", componentName=" + sc.getName()
+              + ", recoveryEnabled=" + sc.isRecoveryEnabled()
               + ", currentDesiredState=" + oldScState
               + ", newDesiredState=" + newState);
         }
+
         if (!changedComps.containsKey(newState)) {
           changedComps.put(newState, new ArrayList<ServiceComponent>());
         }
@@ -549,6 +571,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
               + ", clusterName=" + clusterName
               + ", serviceName=" + serviceName
               + ", componentName=" + sc.getName()
+              + ", recoveryEnabled=" + sc.isRecoveryEnabled()
               + ", currentDesiredState=" + oldScState
               + ", newDesiredState=" + newState);
 
@@ -562,6 +585,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
                 + ", clusterName=" + clusterName
                 + ", serviceName=" + serviceName
                 + ", componentName=" + sc.getName()
+                + ", recoveryEnabled=" + sc.isRecoveryEnabled()
                 + ", hostname=" + sch.getHostName()
                 + ", currentState=" + oldSchState
                 + ", newDesiredState=" + newState);
@@ -574,6 +598,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
                 + ", clusterName=" + clusterName
                 + ", serviceName=" + serviceName
                 + ", componentName=" + sc.getName()
+                + ", recoveryEnabled=" + sc.isRecoveryEnabled()
                 + ", hostname=" + sch.getHostName()
                 + ", currentState=" + oldSchState
                 + ", newDesiredState=" + newState);
@@ -587,6 +612,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
                 + ", clusterName=" + clusterName
                 + ", serviceName=" + serviceName
                 + ", componentName=" + sc.getName()
+                + ", recoveryEnabled=" + sc.isRecoveryEnabled()
                 + ", hostname=" + sch.getHostName());
 
           continue;
@@ -600,6 +626,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
               + ", clusterId=" + cluster.getClusterId()
               + ", serviceName=" + sch.getServiceName()
               + ", componentName=" + sch.getServiceComponentName()
+              + ", recoveryEnabled=" + sc.isRecoveryEnabled()
               + ", hostname=" + sch.getHostName()
               + ", currentState=" + oldSchState
               + ", newDesiredState=" + newState);
@@ -615,6 +642,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
               + ", clusterName=" + clusterName
               + ", serviceName=" + serviceName
               + ", componentName=" + sc.getName()
+              + ", recoveryEnabled=" + sc.isRecoveryEnabled()
               + ", hostname=" + sch.getHostName()
               + ", currentState=" + oldSchState
               + ", newDesiredState=" + newState);
@@ -628,6 +656,16 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
 
     // TODO additional validation?
 
+    // Validations completed. Update the affected service components now.
+
+    for (ServiceComponent sc : recoveryEnabledComponents) {
+      sc.setRecoveryEnabled(true);
+    }
+
+    for (ServiceComponent sc : recoveryDisabledComponents) {
+      sc.setRecoveryEnabled(false);
+    }
+
     Cluster cluster = clusters.getCluster(clusterNames.iterator().next());
 
     return getManagementController().createAndPersistStages(cluster, requestProperties, null, null, changedComps, changedScHosts,

+ 136 - 30
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java

@@ -18,9 +18,6 @@
 
 package org.apache.ambari.server.orm.dao;
 
-import static org.apache.ambari.server.orm.DBAccessor.DbType.ORACLE;
-import static org.apache.ambari.server.orm.dao.DaoUtils.ORACLE_LIST_LIMIT;
-
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -28,6 +25,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import javax.persistence.EntityManager;
 import javax.persistence.TypedQuery;
@@ -49,16 +47,27 @@ import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity_;
 import org.apache.ambari.server.orm.entities.StageEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 import com.google.common.collect.Lists;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.Singleton;
+import com.google.inject.name.Named;
 import com.google.inject.persist.Transactional;
 
+import static org.apache.ambari.server.orm.DBAccessor.DbType.ORACLE;
+import static org.apache.ambari.server.orm.dao.DaoUtils.ORACLE_LIST_LIMIT;
+
 @Singleton
 public class HostRoleCommandDAO {
 
+  private static final Logger LOG = LoggerFactory.getLogger(HostRoleCommandDAO.class);
+
   private static final String SUMMARY_DTO = String.format(
     "SELECT NEW %s(" +
       "MAX(hrc.stage.skippable), " +
@@ -92,12 +101,122 @@ public class HostRoleCommandDAO {
    */
   private static final String COMPLETED_REQUESTS_SQL = "SELECT DISTINCT task.requestId FROM HostRoleCommandEntity task WHERE task.requestId NOT IN (SELECT task.requestId FROM HostRoleCommandEntity task WHERE task.status IN :notCompletedStatuses) ORDER BY task.requestId {0}";
 
+  /**
+   * A cache that holds {@link HostRoleCommandStatusSummaryDTO} grouped by stage id for requests by request id.
+   * The JPQL computing the host role command status summary for a request is rather expensive
+   * thus this cache helps reducing the load on the database
+   */
+  private final LoadingCache<Long, Map<Long, HostRoleCommandStatusSummaryDTO>> hrcStatusSummaryCache;
+
+  /**
+   * Specifies whether caching for {@link HostRoleCommandStatusSummaryDTO} grouped by stage id for requests
+   * is enabled.
+   */
+  private final boolean hostRoleCommandStatusSummaryCacheEnabled;
+
+
   @Inject
   Provider<EntityManager> entityManagerProvider;
 
   @Inject
   DaoUtils daoUtils;
 
+  public final static String HRC_STATUS_SUMMARY_CACHE_SIZE =  "hostRoleCommandStatusSummaryCacheSize";
+  public final static String HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_MINUTES = "hostRoleCommandStatusCacheExpiryDurationMins";
+  public final static String HRC_STATUS_SUMMARY_CACHE_ENABLED =  "hostRoleCommandStatusSummaryCacheEnabled";
+
+  /**
+   * Invalidates the host role command status summary cache entry that corresponds to the given request.
+   * @param requestId the key of the cache entry to be invalidated.
+   */
+  protected void invalidateHostRoleCommandStatusSummaryCache(Long requestId) {
+    if (!hostRoleCommandStatusSummaryCacheEnabled )
+      return;
+
+    LOG.debug("Invalidating host role command status summary cache for request {} !", requestId);
+    hrcStatusSummaryCache.invalidate(requestId);
+
+  }
+
+  /**
+   * Invalidates those entries in host role command status cache which are dependent on the passed {@link org.apache.ambari.server.orm.entities.HostRoleCommandEntity}
+   * entity.
+   * @param hostRoleCommandEntity
+   */
+  protected void invalidateHostRoleCommandStatusCache(HostRoleCommandEntity hostRoleCommandEntity) {
+    if ( !hostRoleCommandStatusSummaryCacheEnabled )
+      return;
+
+    if (hostRoleCommandEntity != null) {
+      Long requestId = hostRoleCommandEntity.getRequestId();
+      if (requestId == null) {
+        StageEntity stageEntity = hostRoleCommandEntity.getStage();
+        if (stageEntity != null)
+          requestId = stageEntity.getRequestId();
+      }
+
+      if (requestId != null)
+        invalidateHostRoleCommandStatusSummaryCache(requestId.longValue());
+    }
+
+  }
+
+  /**
+   * Loads the counts of tasks for a request and groups them by stage id.
+   * This allows for very efficient loading when there are a huge number of stages
+   * and tasks to iterate (for example, during a Stack Upgrade).
+   * @param requestId the request id
+   * @return the map of stage-to-summary objects
+   */
+  @RequiresSession
+  protected Map<Long, HostRoleCommandStatusSummaryDTO> loadAggregateCounts(Long requestId) {
+
+    TypedQuery<HostRoleCommandStatusSummaryDTO> query = entityManagerProvider.get().createQuery(
+      SUMMARY_DTO, HostRoleCommandStatusSummaryDTO.class);
+
+    query.setParameter("requestId", requestId);
+    query.setParameter("aborted", HostRoleStatus.ABORTED);
+    query.setParameter("completed", HostRoleStatus.COMPLETED);
+    query.setParameter("failed", HostRoleStatus.FAILED);
+    query.setParameter("holding", HostRoleStatus.HOLDING);
+    query.setParameter("holding_failed", HostRoleStatus.HOLDING_FAILED);
+    query.setParameter("holding_timedout", HostRoleStatus.HOLDING_TIMEDOUT);
+    query.setParameter("in_progress", HostRoleStatus.IN_PROGRESS);
+    query.setParameter("pending", HostRoleStatus.PENDING);
+    query.setParameter("queued", HostRoleStatus.QUEUED);
+    query.setParameter("timedout", HostRoleStatus.TIMEDOUT);
+    query.setParameter("skipped_failed", HostRoleStatus.SKIPPED_FAILED);
+
+    Map<Long, HostRoleCommandStatusSummaryDTO> map = new HashMap<Long, HostRoleCommandStatusSummaryDTO>();
+
+    for (HostRoleCommandStatusSummaryDTO dto : daoUtils.selectList(query)) {
+      map.put(dto.getStageId(), dto);
+    }
+
+    return map;
+  }
+
+  @Inject
+  public HostRoleCommandDAO(@Named(HRC_STATUS_SUMMARY_CACHE_ENABLED) boolean hostRoleCommandStatusSummaryCacheEnabled, @Named(HRC_STATUS_SUMMARY_CACHE_SIZE) long hostRoleCommandStatusSummaryCacheLimit, @Named(HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_MINUTES) long hostRoleCommandStatusSummaryCacheExpiryDurationMins) {
+    this.hostRoleCommandStatusSummaryCacheEnabled = hostRoleCommandStatusSummaryCacheEnabled;
+
+    LOG.info("Host role command status summary cache {} !", hostRoleCommandStatusSummaryCacheEnabled ? "enabled" : "disabled");
+
+
+    hrcStatusSummaryCache = CacheBuilder.newBuilder()
+      .maximumSize(hostRoleCommandStatusSummaryCacheLimit)
+      .expireAfterAccess(hostRoleCommandStatusSummaryCacheExpiryDurationMins, TimeUnit.MINUTES)
+      .build(new CacheLoader<Long, Map<Long, HostRoleCommandStatusSummaryDTO>>() {
+        @Override
+        public Map<Long, HostRoleCommandStatusSummaryDTO> load(Long requestId) throws Exception {
+          LOG.debug("Cache miss for host role command status summary object for request {}, fetching from JPA", requestId);
+          Map<Long, HostRoleCommandStatusSummaryDTO> hrcCommandStatusByStageId = loadAggregateCounts(requestId);
+
+          return hrcCommandStatusByStageId;
+        }
+      });
+  }
+
   @RequiresSession
   public HostRoleCommandEntity findByPK(long taskId) {
     return entityManagerProvider.get().find(HostRoleCommandEntity.class, taskId);
@@ -425,11 +544,16 @@ public class HostRoleCommandDAO {
   @Transactional
   public void create(HostRoleCommandEntity stageEntity) {
     entityManagerProvider.get().persist(stageEntity);
+
+    invalidateHostRoleCommandStatusCache(stageEntity);
   }
 
   @Transactional
   public HostRoleCommandEntity merge(HostRoleCommandEntity stageEntity) {
     HostRoleCommandEntity entity = entityManagerProvider.get().merge(stageEntity);
+
+    invalidateHostRoleCommandStatusCache(entity);
+
     return entity;
   }
 
@@ -446,6 +570,8 @@ public class HostRoleCommandDAO {
     List<HostRoleCommandEntity> managedList = new ArrayList<HostRoleCommandEntity>(entities.size());
     for (HostRoleCommandEntity entity : entities) {
       managedList.add(entityManagerProvider.get().merge(entity));
+
+      invalidateHostRoleCommandStatusCache(entity);
     }
     return managedList;
   }
@@ -453,6 +579,8 @@ public class HostRoleCommandDAO {
   @Transactional
   public void remove(HostRoleCommandEntity stageEntity) {
     entityManagerProvider.get().remove(merge(stageEntity));
+
+    invalidateHostRoleCommandStatusCache(stageEntity);
   }
 
   @Transactional
@@ -463,39 +591,17 @@ public class HostRoleCommandDAO {
 
   /**
    * Finds the counts of tasks for a request and groups them by stage id.
-   * This allows for very efficient loading when there are a huge number of stages
-   * and tasks to iterate (for example, during a Stack Upgrade).
    * @param requestId the request id
    * @return the map of stage-to-summary objects
    */
-  @RequiresSession
   public Map<Long, HostRoleCommandStatusSummaryDTO> findAggregateCounts(Long requestId) {
-
-    TypedQuery<HostRoleCommandStatusSummaryDTO> query = entityManagerProvider.get().createQuery(
-        SUMMARY_DTO, HostRoleCommandStatusSummaryDTO.class);
-
-    query.setParameter("requestId", requestId);
-    query.setParameter("aborted", HostRoleStatus.ABORTED);
-    query.setParameter("completed", HostRoleStatus.COMPLETED);
-    query.setParameter("failed", HostRoleStatus.FAILED);
-    query.setParameter("holding", HostRoleStatus.HOLDING);
-    query.setParameter("holding_failed", HostRoleStatus.HOLDING_FAILED);
-    query.setParameter("holding_timedout", HostRoleStatus.HOLDING_TIMEDOUT);
-    query.setParameter("in_progress", HostRoleStatus.IN_PROGRESS);
-    query.setParameter("pending", HostRoleStatus.PENDING);
-    query.setParameter("queued", HostRoleStatus.QUEUED);
-    query.setParameter("timedout", HostRoleStatus.TIMEDOUT);
-    query.setParameter("skipped_failed", HostRoleStatus.SKIPPED_FAILED);
-
-    Map<Long, HostRoleCommandStatusSummaryDTO> map = new HashMap<Long, HostRoleCommandStatusSummaryDTO>();
-
-    for (HostRoleCommandStatusSummaryDTO dto : daoUtils.selectList(query)) {
-      map.put(dto.getStageId(), dto);
-    }
-
-    return map;
+    if (hostRoleCommandStatusSummaryCacheEnabled)
+      return hrcStatusSummaryCache.getUnchecked(requestId);
+    else
+      return loadAggregateCounts(requestId); // if caching not enabled fall back to fetching through JPA
   }
 
+
   /**
    * Updates the {@link HostRoleCommandEntity#isFailureAutoSkipped()} flag for
    * all commands for the given request.

+ 6 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java

@@ -32,6 +32,7 @@ import javax.persistence.FetchType;
 import javax.persistence.GeneratedValue;
 import javax.persistence.GenerationType;
 import javax.persistence.Id;
+import javax.persistence.Index;
 import javax.persistence.JoinColumn;
 import javax.persistence.JoinColumns;
 import javax.persistence.Lob;
@@ -48,7 +49,11 @@ import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.commons.lang.ArrayUtils;
 
 @Entity
-@Table(name = "host_role_command")
+@Table(name = "host_role_command"
+       , indexes = {
+           @Index(name = "idx_hrc_request_id", columnList = "request_id")
+         , @Index(name = "idx_hrc_status_role", columnList = "status, role")
+       })
 @TableGenerator(name = "host_role_command_id_generator",
     table = "ambari_sequences", pkColumnName = "sequence_name", valueColumnName = "sequence_value"
     , pkColumnValue = "host_role_command_id_seq"

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java

@@ -81,6 +81,9 @@ public class ServiceComponentDesiredStateEntity {
   @Enumerated(EnumType.STRING)
   private State desiredState = State.INIT;
 
+  @Column(name = "recovery_enabled", nullable = false, insertable = true, updatable = true)
+  private Integer recoveryEnabled = 0;
+
   /**
    * Unidirectional one-to-one association to {@link StackEntity}
    */
@@ -180,6 +183,14 @@ public class ServiceComponentDesiredStateEntity {
     return serviceComponentHistory;
   }
 
+  public boolean isRecoveryEnabled() {
+    return recoveryEnabled != 0;
+  }
+
+  public void setRecoveryEnabled(boolean recoveryEnabled) {
+    this.recoveryEnabled = (recoveryEnabled == false) ? 0 : 1;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) {

+ 2 - 11
ambari-server/src/main/java/org/apache/ambari/server/serveraction/ServerActionExecutor.java

@@ -392,17 +392,8 @@ public class ServerActionExecutor {
    * @throws InterruptedException
    */
   public void doWork() throws InterruptedException {
-    List<HostRoleCommand> tasks = db.getTasksByHostRoleAndStatus(serverHostName,
-        Role.AMBARI_SERVER_ACTION.toString(), HostRoleStatus.QUEUED);
-
-    if (null == tasks || tasks.isEmpty()) {
-      // !!! if the server is not a part of the cluster,
-      // !!! just look for anything designated AMBARI_SERVER_ACTION.
-      // !!! do we even need to worry about servername in the first place?  We're
-      // !!! _on_ the ambari server!
-      tasks = db.getTasksByRoleAndStatus(Role.AMBARI_SERVER_ACTION.name(),
-          HostRoleStatus.QUEUED);
-    }
+    List<HostRoleCommand> tasks = db.getTasksByRoleAndStatus(Role.AMBARI_SERVER_ACTION.name(),
+      HostRoleStatus.QUEUED);
 
     if ((tasks != null) && !tasks.isEmpty()) {
       for (HostRoleCommand task : tasks) {

+ 14 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java

@@ -28,6 +28,20 @@ public interface ServiceComponent {
 
   String getName();
 
+  /**
+   * Get a true or false value specifying
+   * if auto start was enabled for this component.
+   * @return true or false
+   */
+  boolean isRecoveryEnabled();
+
+  /**
+   * Set a true or false value specifying if this
+   * component is to be enabled for auto start or not.
+   * @param recoveryEnabled - true or false
+   */
+  void setRecoveryEnabled(boolean recoveryEnabled);
+
   String getServiceName();
 
   long getClusterId();

+ 71 - 10
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java

@@ -95,7 +95,7 @@ public class ServiceComponentImpl implements ServiceComponent {
     desiredStateEntity.setDesiredState(State.INIT);
     desiredStateEntity.setServiceName(service.getName());
     desiredStateEntity.setClusterId(service.getClusterId());
-
+    desiredStateEntity.setRecoveryEnabled(false);
     setDesiredStackVersion(service.getDesiredStackVersion());
 
     hostComponents = new HashMap<String, ServiceComponentHost>();
@@ -181,6 +181,55 @@ public class ServiceComponentImpl implements ServiceComponent {
     return componentName;
   }
 
+  /**
+   * Get the recoveryEnabled value.
+   *
+   * @return true or false
+   */
+  @Override
+  public boolean isRecoveryEnabled() {
+    ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+    if (desiredStateEntity != null) {
+      return desiredStateEntity.isRecoveryEnabled();
+    } else {
+      LOG.warn("Trying to fetch a member from an entity object that may " +
+              "have been previously deleted, serviceName = " + service.getName() + ", " +
+              "componentName = " + componentName);
+    }
+    return false;
+  }
+
+  /**
+   * Set the recoveryEnabled field in the entity object.
+   *
+   * @param recoveryEnabled - true or false
+   */
+  @Override
+  public void setRecoveryEnabled(boolean recoveryEnabled) {
+    readWriteLock.writeLock().lock();
+    try {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting RecoveryEnabled of Component" + ", clusterName="
+                + service.getCluster().getClusterName() + ", clusterId="
+                + service.getCluster().getClusterId() + ", serviceName="
+                + service.getName() + ", componentName=" + getName()
+                + ", oldRecoveryEnabled=" + isRecoveryEnabled() + ", newRecoveryEnabled="
+                + recoveryEnabled);
+      }
+      ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
+      if (desiredStateEntity != null) {
+        desiredStateEntity.setRecoveryEnabled(recoveryEnabled);
+        saveIfPersisted(desiredStateEntity);
+      } else {
+        LOG.warn("Setting a member on an entity object that may have been " +
+                "previously deleted, serviceName = " + service.getName());
+      }
+
+    } finally {
+      readWriteLock.writeLock().unlock();
+    }
+  }
+
   @Override
   public String getServiceName() {
     return service.getName();
@@ -247,7 +296,8 @@ public class ServiceComponentImpl implements ServiceComponent {
               + ", clusterId=" + service.getCluster().getClusterId()
               + ", serviceName=" + service.getName()
               + ", serviceComponentName=" + getName()
-              + ", hostname=" + hostComponent.getHostName());
+              + ", hostname=" + hostComponent.getHostName()
+              + ", recoveryEnabled=" + isRecoveryEnabled());
         }
         if (hostComponents.containsKey(hostComponent.getHostName())) {
           throw new AmbariException("Cannot add duplicate ServiceComponentHost"
@@ -255,7 +305,8 @@ public class ServiceComponentImpl implements ServiceComponent {
               + ", clusterId=" + service.getCluster().getClusterId()
               + ", serviceName=" + service.getName()
               + ", serviceComponentName=" + getName()
-              + ", hostname=" + hostComponent.getHostName());
+              + ", hostname=" + hostComponent.getHostName()
+              + ", recoveryEnabled=" + isRecoveryEnabled());
         }
         // FIXME need a better approach of caching components by host
         ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
@@ -283,6 +334,7 @@ public class ServiceComponentImpl implements ServiceComponent {
               + ", clusterId=" + service.getCluster().getClusterId()
               + ", serviceName=" + service.getName()
               + ", serviceComponentName=" + getName()
+              + ", recoveryEnabled=" + isRecoveryEnabled()
               + ", hostname=" + hostName);
         }
         if (hostComponents.containsKey(hostName)) {
@@ -291,6 +343,7 @@ public class ServiceComponentImpl implements ServiceComponent {
               + ", clusterId=" + service.getCluster().getClusterId()
               + ", serviceName=" + service.getName()
               + ", serviceComponentName=" + getName()
+              + ", recoveryEnabled=" + isRecoveryEnabled()
               + ", hostname=" + hostName);
         }
         ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
@@ -354,11 +407,11 @@ public class ServiceComponentImpl implements ServiceComponent {
     try {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Setting DesiredState of Service" + ", clusterName="
-            + service.getCluster().getClusterName() + ", clusterId="
-            + service.getCluster().getClusterId() + ", serviceName="
-            + service.getName() + ", serviceComponentName=" + getName()
-            + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
-            + state);
+                + service.getCluster().getClusterName() + ", clusterId="
+                + service.getCluster().getClusterId() + ", serviceName="
+                + service.getName() + ", serviceComponentName=" + getName()
+                + ", oldDesiredState=" + getDesiredState() + ", newDesiredState="
+                + state);
       }
       ServiceComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
       if (desiredStateEntity != null) {
@@ -428,7 +481,8 @@ public class ServiceComponentImpl implements ServiceComponent {
       ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
           cluster.getClusterName(), service.getName(), getName(),
           getDesiredStackVersion().getStackId(), getDesiredState().toString(),
-          getTotalCount(), getStartedCount(), getInstalledCount());
+          getTotalCount(), getStartedCount(), getInstalledCount(),
+          isRecoveryEnabled());
       return r;
     } finally {
       readWriteLock.readLock().unlock();
@@ -440,11 +494,13 @@ public class ServiceComponentImpl implements ServiceComponent {
     return service.getCluster().getClusterName();
   }
 
+
   @Override
   public void debugDump(StringBuilder sb) {
     readWriteLock.readLock().lock();
     try {
       sb.append("ServiceComponent={ serviceComponentName=" + getName()
+          + ", recoveryEnabled=" + isRecoveryEnabled()
           + ", clusterName=" + service.getCluster().getClusterName()
           + ", clusterId=" + service.getCluster().getClusterId()
           + ", serviceName=" + service.getName() + ", desiredStackVersion="
@@ -592,6 +648,7 @@ public class ServiceComponentImpl implements ServiceComponent {
                 + ", clusterName=" + getClusterName()
                 + ", serviceName=" + getServiceName()
                 + ", componentName=" + getName()
+                + ", recoveryEnabled=" + isRecoveryEnabled()
                 + ", hostname=" + sch.getHostName());
             return false;
           }
@@ -615,7 +672,8 @@ public class ServiceComponentImpl implements ServiceComponent {
         LOG.info("Deleting all servicecomponenthosts for component"
             + ", clusterName=" + getClusterName()
             + ", serviceName=" + getServiceName()
-            + ", componentName=" + getName());
+            + ", componentName=" + getName()
+            + ", recoveryEnabled=" + isRecoveryEnabled());
         for (ServiceComponentHost sch : hostComponents.values()) {
           if (!sch.canBeRemoved()) {
             throw new AmbariException("Found non removable hostcomponent "
@@ -624,6 +682,7 @@ public class ServiceComponentImpl implements ServiceComponent {
                 + ", clusterName=" + getClusterName()
                 + ", serviceName=" + getServiceName()
                 + ", componentName=" + getName()
+                + ", recoveryEnabled=" + isRecoveryEnabled()
                 + ", hostname=" + sch.getHostName());
           }
         }
@@ -652,12 +711,14 @@ public class ServiceComponentImpl implements ServiceComponent {
             + ", clusterName=" + getClusterName()
             + ", serviceName=" + getServiceName()
             + ", componentName=" + getName()
+            + ", recoveryEnabled=" + isRecoveryEnabled()
             + ", hostname=" + sch.getHostName());
         if (!sch.canBeRemoved()) {
           throw new AmbariException("Could not delete hostcomponent from cluster"
               + ", clusterName=" + getClusterName()
               + ", serviceName=" + getServiceName()
               + ", componentName=" + getName()
+              + ", recoveryEnabled=" + isRecoveryEnabled()
               + ", hostname=" + sch.getHostName());
         }
         sch.delete();

+ 25 - 11
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java

@@ -2474,13 +2474,31 @@ public class ClusterImpl implements Cluster {
     clusterGlobalLock.readLock().lock();
     try {
       List<ServiceConfigVersionResponse> serviceConfigVersionResponses = new ArrayList<ServiceConfigVersionResponse>();
-      Set<Long> activeIds = getActiveServiceConfigVersionIds();
 
-      for (ServiceConfigEntity serviceConfigEntity : serviceConfigDAO.getServiceConfigs(getClusterId())) {
+      List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigs(getClusterId());
+      Map<String, ServiceConfigVersionResponse> activeServiceConfigResponses = new HashMap<>();
+
+      for (ServiceConfigEntity serviceConfigEntity : serviceConfigs) {
         ServiceConfigVersionResponse serviceConfigVersionResponse = convertToServiceConfigVersionResponse(serviceConfigEntity);
 
+        ServiceConfigVersionResponse activeServiceConfigResponse = activeServiceConfigResponses.get(serviceConfigVersionResponse.getServiceName());
+        if (activeServiceConfigResponse == null) {
+          activeServiceConfigResponse = serviceConfigVersionResponse;
+          activeServiceConfigResponses.put(serviceConfigVersionResponse.getServiceName(), serviceConfigVersionResponse);
+        }
+
         serviceConfigVersionResponse.setConfigurations(new ArrayList<ConfigurationResponse>());
-        serviceConfigVersionResponse.setIsCurrent(activeIds.contains(serviceConfigEntity.getServiceConfigId()));
+
+        if (serviceConfigEntity.getGroupId() == null) {
+          if (serviceConfigVersionResponse.getCreateTime() > activeServiceConfigResponse.getCreateTime())
+            activeServiceConfigResponses.put(serviceConfigVersionResponse.getServiceName(), serviceConfigVersionResponse);
+        }
+        else if (clusterConfigGroups != null && clusterConfigGroups.containsKey(serviceConfigEntity.getGroupId())){
+          if (serviceConfigVersionResponse.getVersion() > activeServiceConfigResponse.getVersion())
+            activeServiceConfigResponses.put(serviceConfigVersionResponse.getServiceName(), serviceConfigVersionResponse);
+        }
+
+        serviceConfigVersionResponse.setIsCurrent(false);
 
         List<ClusterConfigEntity> clusterConfigEntities = serviceConfigEntity.getClusterConfigEntities();
         for (ClusterConfigEntity clusterConfigEntity : clusterConfigEntities) {
@@ -2496,6 +2514,10 @@ public class ClusterImpl implements Cluster {
         serviceConfigVersionResponses.add(serviceConfigVersionResponse);
       }
 
+      for (ServiceConfigVersionResponse serviceConfigVersionResponse: activeServiceConfigResponses.values()) {
+        serviceConfigVersionResponse.setIsCurrent(true);
+      }
+
       return serviceConfigVersionResponses;
     } finally {
       clusterGlobalLock.readLock().unlock();
@@ -2514,14 +2536,6 @@ public class ClusterImpl implements Cluster {
     return responses;
   }
 
-  private Set<Long> getActiveServiceConfigVersionIds() {
-    Set<Long> idSet = new HashSet<Long>();
-    for (ServiceConfigEntity entity : getActiveServiceConfigVersionEntities()) {
-      idSet.add(entity.getServiceConfigId());
-    }
-    return idSet;
-  }
-
   private List<ServiceConfigEntity> getActiveServiceConfigVersionEntities() {
 
     List<ServiceConfigEntity> activeServiceConfigVersions = new ArrayList<ServiceConfigEntity>();

+ 13 - 4
ambari-server/src/main/java/org/apache/ambari/server/state/services/AlertNoticeDispatchService.java

@@ -451,7 +451,7 @@ public class AlertNoticeDispatchService extends AbstractScheduledService {
     String targetType = target.getNotificationType();
 
     // build the velocity objects for template rendering
-    AmbariInfo ambari = new AmbariInfo(m_metaInfo.get());
+    AmbariInfo ambari = new AmbariInfo(m_metaInfo.get(), m_configuration);
     AlertSummaryInfo summary = new AlertSummaryInfo(histories);
     DispatchInfo dispatch = new DispatchInfo(target);
 
@@ -516,7 +516,7 @@ public class AlertNoticeDispatchService extends AbstractScheduledService {
     String targetType = target.getNotificationType();
 
     // build the velocity objects for template rendering
-    AmbariInfo ambari = new AmbariInfo(m_metaInfo.get());
+    AmbariInfo ambari = new AmbariInfo(m_metaInfo.get(), m_configuration);
     AlertInfo alert = new AlertInfo(history);
     DispatchInfo dispatch = new DispatchInfo(target);
 
@@ -558,6 +558,10 @@ public class AlertNoticeDispatchService extends AbstractScheduledService {
         bodyWriter.write(alert.getAlertName());
         bodyWriter.write(" ");
         bodyWriter.write(alert.getAlertText());
+        if (alert.hasHostName()) {
+          bodyWriter.write(" ");
+          bodyWriter.append(alert.getHostName());
+        }
         bodyWriter.write("\n");
       }
     }
@@ -1042,7 +1046,8 @@ public class AlertNoticeDispatchService extends AbstractScheduledService {
      *
      * @param metaInfo
      */
-    protected AmbariInfo(AmbariMetaInfo metaInfo) {
+    protected AmbariInfo(AmbariMetaInfo metaInfo, Configuration m_configuration) {
+      m_url = m_configuration.getAmbariDisplayUrl();
       m_version = metaInfo.getServerVersion();
     }
 
@@ -1053,6 +1058,10 @@ public class AlertNoticeDispatchService extends AbstractScheduledService {
       return m_hostName;
     }
 
+    public boolean hasUrl() {
+      return m_url != null;
+    }
+
     /**
      * @return the url
      */
@@ -1193,4 +1202,4 @@ public class AlertNoticeDispatchService extends AbstractScheduledService {
       return m_body;
     }
   }
-}
+}

+ 55 - 0
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java

@@ -28,11 +28,15 @@ import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.utils.VersionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.sql.SQLException;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
@@ -49,6 +53,16 @@ public class UpgradeCatalog222 extends AbstractUpgradeCatalog {
    */
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog222.class);
   private static final String AMS_SITE = "ams-site";
+  private static final String HIVE_SITE_CONFIG = "hive-site";
+  private static final String ATLAS_APPLICATION_PROPERTIES_CONFIG = "application-properties";
+  private static final String ATLAS_HOOK_HIVE_MINTHREADS_PROPERTY = "atlas.hook.hive.minThreads";
+  private static final String ATLAS_HOOK_HIVE_MAXTHREADS_PROPERTY = "atlas.hook.hive.maxThreads";
+  private static final String ATLAS_CLUSTER_NAME_PROPERTY = "atlas.cluster.name";
+  private static final String ATLAS_ENABLETLS_PROPERTY = "atlas.enableTLS";
+  private static final String ATLAS_SERVER_HTTP_PORT_PROPERTY = "atlas.server.http.port";
+  private static final String ATLAS_SERVER_HTTPS_PORT_PROPERTY = "atlas.server.https.port";
+  private static final String ATLAS_REST_ADDRESS_PROPERTY = "atlas.rest.address";
+
   private static final String HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER =
     "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier";
   private static final String CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER =
@@ -115,6 +129,8 @@ public class UpgradeCatalog222 extends AbstractUpgradeCatalog {
     updateAlerts();
     updateStormConfigs();
     updateAMSConfigs();
+    updateHiveConfig();
+    updateHostRoleCommands();
   }
 
   protected void updateStormConfigs() throws  AmbariException {
@@ -153,6 +169,10 @@ public class UpgradeCatalog222 extends AbstractUpgradeCatalog {
 
   }
 
+  protected void updateHostRoleCommands() throws SQLException{
+    dbAccessor.createIndex("idx_hrc_status", "host_role_command", "status", "role");
+  }
+
   protected void updateAMSConfigs() throws AmbariException {
     AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
     Clusters clusters = ambariManagementController.getClusters();
@@ -280,6 +300,41 @@ public class UpgradeCatalog222 extends AbstractUpgradeCatalog {
     }
   }
 
+  protected void updateHiveConfig() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config hiveSiteConfig = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG);
+      Config atlasConfig = cluster.getDesiredConfigByType(ATLAS_APPLICATION_PROPERTIES_CONFIG);
+
+      StackId stackId = cluster.getCurrentStackVersion();
+      boolean isStackNotLess23 = (stackId != null && stackId.getStackName().equals("HDP") &&
+        VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
+
+      List<ServiceComponentHost> atlasHost = cluster.getServiceComponentHosts("ATLAS", "ATLAS_SERVER");
+      Map<String, String> updates = new HashMap<String, String>();
+
+      if (isStackNotLess23 && atlasHost.size() != 0 && hiveSiteConfig != null) {
+
+        updates.put(ATLAS_HOOK_HIVE_MINTHREADS_PROPERTY, "1");
+        updates.put(ATLAS_HOOK_HIVE_MAXTHREADS_PROPERTY, "1");
+        updates.put(ATLAS_CLUSTER_NAME_PROPERTY, "primary");
+
+        if (atlasConfig != null && atlasConfig.getProperties().containsKey(ATLAS_ENABLETLS_PROPERTY)) {
+          String atlasEnableTLSProperty = atlasConfig.getProperties().get(ATLAS_ENABLETLS_PROPERTY);
+          String atlasScheme = "http";
+          String atlasServerHttpPortProperty = atlasConfig.getProperties().get(ATLAS_SERVER_HTTP_PORT_PROPERTY);
+          if (atlasEnableTLSProperty.toLowerCase().equals("true")) {
+            atlasServerHttpPortProperty = atlasConfig.getProperties().get(ATLAS_SERVER_HTTPS_PORT_PROPERTY);
+            atlasScheme = "https";
+          }
+          updates.put(ATLAS_REST_ADDRESS_PROPERTY, String.format("%s://%s:%s", atlasScheme, atlasHost.get(0).getHostName(), atlasServerHttpPortProperty));
+        }
+        updateConfigurationPropertiesForCluster(cluster, HIVE_SITE_CONFIG, updates, false, false);
+      }
+    }
+  }
+
+
   private String convertToDaysIfInSeconds(String secondsString) {
 
     int seconds = Integer.valueOf(secondsString);

+ 14 - 0
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java

@@ -89,6 +89,8 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   private static final String ID = "id";
   private static final String SETTING_TABLE = "setting";
 
+  protected static final String SERVICE_COMPONENT_DESIRED_STATE_TABLE = "servicecomponentdesiredstate";
+  protected static final String RECOVERY_ENABLED_COL = "recovery_enabled";
 
   // ----- Constructors ------------------------------------------------------
 
@@ -127,6 +129,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
     updateAdminPermissionTable();
+    updateServiceComponentDesiredStateTable();
     createSettingTable();
     updateRepoVersionTableDDL();
     updateServiceComponentDesiredStateTableDDL();
@@ -562,4 +565,15 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
 
     addSequence("servicecomponent_history_id_seq", 0L, false);
   }
+
+  /**
+   * Alter servicecomponentdesiredstate table to add recovery_enabled column.
+   * @throws SQLException
+   */
+  private void updateServiceComponentDesiredStateTable() throws SQLException {
+    // ALTER TABLE servicecomponentdesiredstate ADD COLUMN
+    // recovery_enabled SMALLINT DEFAULT 0 NOT NULL
+    dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
+            new DBColumnInfo(RECOVERY_ENABLED_COL, Short.class, null, 0, false));
+  }
 }

+ 1 - 0
ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql

@@ -177,6 +177,7 @@ CREATE TABLE servicecomponentdesiredstate (
   desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
+  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );

+ 2 - 0
ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql

@@ -178,6 +178,7 @@ CREATE TABLE servicecomponentdesiredstate (
   desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(100) NOT NULL,
+  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );
@@ -683,6 +684,7 @@ CREATE TABLE setting (
 -- tasks indices --
 CREATE INDEX idx_stage_request_id ON stage (request_id);
 CREATE INDEX idx_hrc_request_id ON host_role_command (request_id);
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role);
 CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id);
 
 -- altering tables by creating unique constraints----------

+ 2 - 0
ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql

@@ -168,6 +168,7 @@ CREATE TABLE servicecomponentdesiredstate (
   desired_stack_id NUMBER(19) NOT NULL,
   desired_state VARCHAR2(255) NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
+  recovery_enabled SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );
@@ -672,6 +673,7 @@ CREATE TABLE setting (
 -- tasks indices --
 CREATE INDEX idx_stage_request_id ON stage (request_id);
 CREATE INDEX idx_hrc_request_id ON host_role_command (request_id);
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role);
 CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id);
 
 --------altering tables by creating unique constraints----------

+ 4 - 0
ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql

@@ -177,6 +177,7 @@ CREATE TABLE servicecomponentdesiredstate (
   desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
+  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );
@@ -676,8 +677,11 @@ CREATE TABLE setting (
 -- tasks indices --
 CREATE INDEX idx_stage_request_id ON stage (request_id);
 CREATE INDEX idx_hrc_request_id ON host_role_command (request_id);
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role);
 CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id);
 
+
+
 --------altering tables by creating unique constraints----------
 ALTER TABLE users ADD CONSTRAINT UNQ_users_0 UNIQUE (user_name, user_type);
 ALTER TABLE clusterconfig ADD CONSTRAINT UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag);

+ 2 - 0
ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql

@@ -202,6 +202,7 @@ CREATE TABLE ambari.servicecomponentdesiredstate (
   desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
+  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );
@@ -758,6 +759,7 @@ GRANT ALL PRIVILEGES ON TABLE ambari.setting TO :username;
 -- tasks indices --
 CREATE INDEX idx_stage_request_id ON ambari.stage (request_id);
 CREATE INDEX idx_hrc_request_id ON ambari.host_role_command (request_id);
+CREATE INDEX idx_hrc_status_role ON ambari.host_role_command (status, role);
 CREATE INDEX idx_rsc_request_id ON ambari.role_success_criteria (request_id);
 
 --------altering tables by creating unique constraints----------

+ 2 - 0
ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql

@@ -167,6 +167,7 @@ CREATE TABLE servicecomponentdesiredstate (
   desired_stack_id NUMERIC(19) NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
+  recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );
@@ -673,6 +674,7 @@ CREATE TABLE setting (
 -- tasks indices --
 CREATE INDEX idx_stage_request_id ON stage (request_id);
 CREATE INDEX idx_hrc_request_id ON host_role_command (request_id);
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role);
 CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id);
 
 -- altering tables by creating unique constraints----------

+ 1 - 1
ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql

@@ -187,7 +187,6 @@ CREATE TABLE servicecomponentdesiredstate (
   desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
-  PRIMARY KEY CLUSTERED (id),
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id)
 );
@@ -782,6 +781,7 @@ CREATE TABLE setting (
 -- tasks indices --
 CREATE INDEX idx_stage_request_id ON stage (request_id);
 CREATE INDEX idx_hrc_request_id ON host_role_command (request_id);
+CREATE INDEX idx_hrc_status_role ON host_role_command (status, role);
 CREATE INDEX idx_rsc_request_id ON role_success_criteria (request_id);
 
 

+ 19 - 1
ambari-server/src/main/resources/alert-templates.xml

@@ -158,6 +158,20 @@
                   <div class="label-small">
                     $alert.getAlertText()
                   </div>
+                  <div class="label-small">
+                    Cluster: $alert.getAlertDefinition().getCluster().getClusterName()
+                  </div>
+                  #if( $alert.getHostName() )
+                    #if( $ambari.hasUrl() )
+                      <div class="label-small">
+                      Host: <a href=$ambari.getUrl()/#/main/hosts/$alert.getHostName()/summary>$ambari.getUrl()/#/main/hosts/$alert.getHostName()/summary</a>
+                      </div>
+                    #else
+                      <div class="label-small">
+                        Host: $alert.getHostName()
+                      </div>
+                    #end
+                  #end
                 </td>
               </tr>
             #end
@@ -170,6 +184,10 @@
     This notification was sent to $dispatch.getTargetName()
     <br/>
     Apache Ambari $ambari.getServerVersion()
+    #if( $ambari.hasUrl() )
+    <br/>
+    Ambari Server link: <a href=$ambari.getUrl()>$ambari.getUrl()</a>
+    #end
   </div>
 </html>
       ]]>
@@ -193,4 +211,4 @@
 $alert.getAlertText()]]>
     </body>
   </alert-template>  
-</alert-templates>
+</alert-templates>

+ 0 - 1
ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-env.xml

@@ -68,7 +68,6 @@
     <description>Log Directories for Accumulo.</description>
     <value-attributes>
       <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
   </property>

+ 0 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml

@@ -45,7 +45,6 @@
     <description>Falcon log directory.</description>
     <value-attributes>
       <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
   </property>

+ 0 - 1
ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-env.xml

@@ -40,7 +40,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 0 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml

@@ -29,7 +29,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 0 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml

@@ -29,7 +29,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 0 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml

@@ -100,7 +100,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 1 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml

@@ -434,6 +434,7 @@ limitations under the License.
 
   <property>
     <name>atlas.cluster.name</name>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
     <value>primary</value>
     <depends-on>
       <property>

+ 0 - 1
ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml

@@ -324,7 +324,6 @@
     <value>{{metric_collector_port}}</value>
     <description>Timeline port</description>
   </property>
-  <property>
   <property>
     <name>kafka.timeline.metrics.protocol</name>
     <value>{{metric_collector_protocol}}</value>

+ 0 - 1
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-env.xml

@@ -63,7 +63,6 @@
     <description>Directory for oozie logs</description>
     <value-attributes>
       <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
   </property>

+ 0 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml

@@ -29,7 +29,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 0 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml

@@ -29,7 +29,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 0 - 1
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-env.xml

@@ -49,7 +49,6 @@
     <description>ZooKeeper Pid Dir</description>
     <value-attributes>
       <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
   </property>

+ 1 - 0
ambari-server/src/main/resources/properties.json

@@ -62,6 +62,7 @@
         "ServiceComponentInfo/total_count",
         "ServiceComponentInfo/started_count",
         "ServiceComponentInfo/installed_count",
+        "ServiceComponentInfo/recovery_enabled",
         "params/run_smoke_test",
         "_"
     ],

+ 25 - 21
ambari-server/src/main/resources/scripts/Ambaripreupload.py

@@ -132,6 +132,7 @@ with Environment() as env:
   TAR_DESTINATION_FOLDER_SUFFIX = "_tar_destination_folder"
   
   class params:
+    hdfs_path_prefix = hdfs_path_prefix
     hdfs_user = "hdfs"
     mapred_user ="mapred"
     hadoop_bin_dir="/usr/hdp/" + hdp_version + "/hadoop/bin"
@@ -236,23 +237,23 @@ with Environment() as env:
     return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
   
   def createHdfsResources():
-    params.HdfsResource('/atshistory', user='hdfs', change_permissions_for_parents=True, owner='yarn', group='hadoop', type='directory', action= ['create_on_execute'], mode=0755)
-    params.HdfsResource('/user/hcat', owner='hcat', type='directory', action=['create_on_execute'], mode=0755)
-    params.HdfsResource('/hive/warehouse', owner='hive', type='directory', action=['create_on_execute'], mode=0777)
-    params.HdfsResource('/user/hive', owner='hive', type='directory', action=['create_on_execute'], mode=0755)
-    params.HdfsResource('/tmp', mode=0777, action=['create_on_execute'], type='directory', owner='hdfs')
-    params.HdfsResource('/user/ambari-qa', type='directory', action=['create_on_execute'], mode=0770)
-    params.HdfsResource('/user/oozie', owner='oozie', type='directory', action=['create_on_execute'], mode=0775)
-    params.HdfsResource('/app-logs', recursive_chmod=True, owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
-    params.HdfsResource('/tmp/entity-file-history/active', owner='yarn', group='hadoop', type='directory', action=['create_on_execute'])
-    params.HdfsResource('/mapred', owner='mapred', type='directory', action=['create_on_execute'])
-    params.HdfsResource('/mapred/system', owner='hdfs', type='directory', action=['create_on_execute'])
-    params.HdfsResource('/mr-history/done', change_permissions_for_parents=True, owner='mapred', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
-    params.HdfsResource('/atshistory/done', owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0700)
-    params.HdfsResource('/atshistory/active', owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=01777)
-    params.HdfsResource('/ams/hbase', owner='ams', type='directory', action=['create_on_execute'], mode=0775)
-    params.HdfsResource('/amshbase/staging', owner='ams', type='directory', action=['create_on_execute'], mode=0711)
-    params.HdfsResource('/user/ams/hbase', owner='ams', type='directory', action=['create_on_execute'], mode=0775)
+    params.HdfsResource(format('{hdfs_path_prefix}/atshistory'), user='hdfs', change_permissions_for_parents=True, owner='yarn', group='hadoop', type='directory', action= ['create_on_execute'], mode=0755)
+    params.HdfsResource(format('{hdfs_path_prefix}/user/hcat'), owner='hcat', type='directory', action=['create_on_execute'], mode=0755)
+    params.HdfsResource(format('{hdfs_path_prefix}/hive/warehouse'), owner='hive', type='directory', action=['create_on_execute'], mode=0777)
+    params.HdfsResource(format('{hdfs_path_prefix}/user/hive'), owner='hive', type='directory', action=['create_on_execute'], mode=0755)
+    params.HdfsResource(format('{hdfs_path_prefix}/tmp'), mode=0777, action=['create_on_execute'], type='directory', owner='hdfs')
+    params.HdfsResource(format('{hdfs_path_prefix}/user/ambari-qa'), type='directory', action=['create_on_execute'], mode=0770)
+    params.HdfsResource(format('{hdfs_path_prefix}/user/oozie'), owner='oozie', type='directory', action=['create_on_execute'], mode=0775)
+    params.HdfsResource(format('{hdfs_path_prefix}/app-logs'), recursive_chmod=True, owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
+    params.HdfsResource(format('{hdfs_path_prefix}/tmp/entity-file-history/active'), owner='yarn', group='hadoop', type='directory', action=['create_on_execute'])
+    params.HdfsResource(format('{hdfs_path_prefix}/mapred'), owner='mapred', type='directory', action=['create_on_execute'])
+    params.HdfsResource(format('{hdfs_path_prefix}/mapred/system'), owner='hdfs', type='directory', action=['create_on_execute'])
+    params.HdfsResource(format('{hdfs_path_prefix}/mr-history/done'), change_permissions_for_parents=True, owner='mapred', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
+    params.HdfsResource(format('{hdfs_path_prefix}/atshistory/done'), owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0700)
+    params.HdfsResource(format('{hdfs_path_prefix}/atshistory/active'), owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=01777)
+    params.HdfsResource(format('{hdfs_path_prefix}/ams/hbase'), owner='ams', type='directory', action=['create_on_execute'], mode=0775)
+    params.HdfsResource(format('{hdfs_path_prefix}/amshbase/staging'), owner='ams', type='directory', action=['create_on_execute'], mode=0711)
+    params.HdfsResource(format('{hdfs_path_prefix}/user/ams/hbase'), owner='ams', type='directory', action=['create_on_execute'], mode=0775)
 
 
   def putCreatedHdfsResourcesToIgnore(env):
@@ -262,28 +263,31 @@ with Environment() as env:
     
     file_content = ""
     for file in env.config['hdfs_files']:
-      file_content += file['target']
+      if not file['target'].startswith(hdfs_path_prefix):
+        raise Exception("Something created outside hdfs_path_prefix!")
+      file_content += file['target'][len(hdfs_path_prefix):]
       file_content += "\n"
       
     with open("/var/lib/ambari-agent/data/.hdfs_resource_ignore", "a+") as fp:
       fp.write(file_content)
       
   def putSQLDriverToOozieShared():
-    params.HdfsResource('/user/oozie/share/lib/sqoop/{0}'.format(os.path.basename(SQL_DRIVER_PATH)),
+    params.HdfsResource(hdfs_path_prefix + '/user/oozie/share/lib/sqoop/{0}'.format(os.path.basename(SQL_DRIVER_PATH)),
                         owner='hdfs', type='file', action=['create_on_execute'], mode=0644, source=SQL_DRIVER_PATH)
       
   env.set_params(params)
   hadoop_conf_dir = params.hadoop_conf_dir
    
   oozie_libext_dir = format("/usr/hdp/{hdp_version}/oozie/libext")
+  sql_driver_filename = os.path.basename(SQL_DRIVER_PATH)
   oozie_home=format("/usr/hdp/{hdp_version}/oozie")
   oozie_setup_sh=format("/usr/hdp/{hdp_version}/oozie/bin/oozie-setup.sh")
   oozie_setup_sh_current="/usr/hdp/current/oozie-server/bin/oozie-setup.sh"
   oozie_tmp_dir = "/var/tmp/oozie"
   configure_cmds = []
   configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
-  configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", format("/usr/hdp/{hdp_version}/oozie/libext")))
-  configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip"))
+  configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", SQL_DRIVER_PATH, oozie_libext_dir))
+  configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip", oozie_libext_dir + "/" + sql_driver_filename))
    
   no_op_test = "ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1"
 

+ 4 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py

@@ -581,8 +581,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
         putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
         putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
         putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
-        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
         putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
+        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
       elif total_sinks_count >= 500:
         putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
         putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
@@ -593,6 +593,9 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
         putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
       pass
 
+    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
+    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
+
     # Distributed mode heap size
     if operatingMode == "distributed":
       hbase_heapsize = max(hbase_heapsize, 756)

+ 0 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml

@@ -29,7 +29,6 @@
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
   </property>
   <property>

+ 10 - 7
ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py

@@ -367,13 +367,16 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     yarn_queues = "default"
     capacitySchedulerProperties = {}
-    if "capacity-scheduler" in services['configurations'] and "capacity-scheduler" in services['configurations']["capacity-scheduler"]["properties"]:
-      properties = str(services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]).split('\n')
-      for property in properties:
-        key,sep,value = property.partition("=")
-        capacitySchedulerProperties[key] = value
-    if "yarn.scheduler.capacity.root.queues" in capacitySchedulerProperties:
-      yarn_queues = str(capacitySchedulerProperties["yarn.scheduler.capacity.root.queues"])
+    if "capacity-scheduler" in services['configurations']:
+      if "capacity-scheduler" in services['configurations']["capacity-scheduler"]["properties"]:
+        properties = str(services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]).split('\n')
+        for property in properties:
+          key,sep,value = property.partition("=")
+          capacitySchedulerProperties[key] = value
+      if "yarn.scheduler.capacity.root.queues" in capacitySchedulerProperties:
+        yarn_queues = str(capacitySchedulerProperties["yarn.scheduler.capacity.root.queues"])
+      elif "yarn.scheduler.capacity.root.queues" in services['configurations']["capacity-scheduler"]["properties"]:
+        yarn_queues =  services['configurations']["capacity-scheduler"]["properties"]["yarn.scheduler.capacity.root.queues"]
     # Interactive Queues property attributes
     putHiveServerPropertyAttribute = self.putPropertyAttribute(configurations, "hiveserver2-site")
     toProcessQueues = yarn_queues.split(",")

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/hive-site.xml

@@ -42,6 +42,7 @@ limitations under the License.
 
   <property>
     <name>atlas.hook.hive.minThreads</name>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
     <value>1</value>
     <description>
       Minimum number of threads maintained by Atlas hook.
@@ -49,6 +50,7 @@ limitations under the License.
   </property>
 
   <property>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
     <name>atlas.hook.hive.maxThreads</name>
     <value>1</value>
     <description>

+ 16 - 16
ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py

@@ -943,31 +943,31 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
                                 "HAWQ Master or Standby Master cannot use the port 5432 when installed on the same host as the Ambari Server. Ambari Postgres DB uses the same port. Please choose a different value (e.g. 10432)")})
 
     # 2. Check if any data directories are pointing to root dir '/'
-    prop_name = 'hawq_master_directory'
-    display_name = 'HAWQ Master directory'
-    self.validateIfRootDir (properties, validationItems, prop_name, display_name)
-
-    prop_name = 'hawq_master_temp_directory'
-    display_name = 'HAWQ Master temp directory'
-    self.validateIfRootDir (properties, validationItems, prop_name, display_name)
-
-    prop_name = 'hawq_segment_directory'
-    display_name = 'HAWQ Segment directory'
-    self.validateIfRootDir (properties, validationItems, prop_name, display_name)
-
-    prop_name = 'hawq_segment_temp_directory'
-    display_name = 'HAWQ Segment temp directory'
-    self.validateIfRootDir (properties, validationItems, prop_name, display_name)
+    directories = {
+                    'hawq_master_directory': 'HAWQ Master directory',
+                    'hawq_master_temp_directory': 'HAWQ Master temp directory',
+                    'hawq_segment_directory': 'HAWQ Segment directory',
+                    'hawq_segment_temp_directory': 'HAWQ Segment temp directory'
+                  }
+    for property_name, display_name in directories.iteritems():
+      self.validateIfRootDir(properties, validationItems, property_name, display_name)
 
     # 3. Check YARN RM address properties
+    YARN = "YARN"
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "YARN" in servicesList and "yarn-site" in configurations:
+    if YARN in servicesList and "yarn-site" in configurations:
       yarn_site = getSiteProperties(configurations, "yarn-site")
       for hs_prop, ys_prop in self.getHAWQYARNPropertyMapping().items():
         if hs_prop in hawq_site and ys_prop in yarn_site and hawq_site[hs_prop] != yarn_site[ys_prop]:
           message = "Expected value: {0} (this property should have the same value as the property {1} in yarn-site)".format(yarn_site[ys_prop], ys_prop)
           validationItems.append({"config-name": hs_prop, "item": self.getWarnItem(message)})
 
+    # 4. Check HAWQ Resource Manager type
+    HAWQ_GLOBAL_RM_TYPE = "hawq_global_rm_type"
+    if YARN not in servicesList and HAWQ_GLOBAL_RM_TYPE in hawq_site and hawq_site[HAWQ_GLOBAL_RM_TYPE].upper() == YARN:
+      message = "{0} must be set to none if YARN service is not installed".format(HAWQ_GLOBAL_RM_TYPE)
+      validationItems.append({"config-name": HAWQ_GLOBAL_RM_TYPE, "item": self.getErrorItem(message)})
+
     return self.toConfigurationValidationProblems(validationItems, "hawq-site")
   
   

+ 38 - 3
ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml

@@ -26,7 +26,7 @@
             <name>HIVE_SERVER_INTERACTIVE</name>
             <displayName>HiveServer2 Interactive</displayName>
             <category>MASTER</category>
-            <cardinality>1</cardinality>
+            <cardinality>0+</cardinality>
             <versionAdvertised>true</versionAdvertised>
             <clientsToUpdateConfigs></clientsToUpdateConfigs>
             <dependencies>
@@ -35,7 +35,7 @@
                 <scope>cluster</scope>
                 <auto-deploy>
                   <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER</co-locate>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
                 </auto-deploy>
               </dependency>
               <dependency>
@@ -43,6 +43,15 @@
                 <scope>host</scope>
                 <auto-deploy>
                   <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>HDFS/HDFS_CLIENT</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
                 </auto-deploy>
               </dependency>
               <dependency>
@@ -50,6 +59,7 @@
                 <scope>host</scope>
                 <auto-deploy>
                   <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
                 </auto-deploy>
               </dependency>
               <dependency>
@@ -57,9 +67,26 @@
                 <scope>host</scope>
                 <auto-deploy>
                   <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>PIG/PIG</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                </auto-deploy>
+              </dependency>
+              <dependency>
+                <name>SLIDER/SLIDER</name>
+                <scope>host</scope>
+                <auto-deploy>
+                  <enabled>true</enabled>
+                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
                 </auto-deploy>
               </dependency>
-              </dependencies>
+            </dependencies>
                 <commandScript>
                   <script>scripts/hive_server_interactive.py</script>
                   <scriptType>PYTHON</scriptType>
@@ -70,6 +97,14 @@
                 </configuration-dependencies>
           </component>
         </components>
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>TEZ</service>
+        <service>PIG</service>
+        <service>SLIDER</service>
+      </requiredServices>
     </service>
   </services>
 </metainfo>

+ 12 - 12
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java

@@ -607,8 +607,8 @@ public class TestActionScheduler {
     doAnswer(new Answer<List<HostRoleCommand>>() {
       @Override
       public List<HostRoleCommand> answer(InvocationOnMock invocation) throws Throwable {
-        String role = (String) invocation.getArguments()[1];
-        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[2];
+        String role = (String) invocation.getArguments()[0];
+        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[1];
 
         HostRoleCommand task = s.getHostRoleCommand(null, role);
 
@@ -618,7 +618,7 @@ public class TestActionScheduler {
           return Collections.emptyList();
         }
       }
-    }).when(db).getTasksByHostRoleAndStatus(anyString(), anyString(), any(HostRoleStatus.class));
+    }).when(db).getTasksByRoleAndStatus(anyString(), any(HostRoleStatus.class));
 
     ServerActionExecutor.init(injector);
     ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3,
@@ -762,8 +762,8 @@ public class TestActionScheduler {
     doAnswer(new Answer<List<HostRoleCommand>>() {
       @Override
       public List<HostRoleCommand> answer(InvocationOnMock invocation) throws Throwable {
-        String role = (String) invocation.getArguments()[1];
-        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[2];
+        String role = (String) invocation.getArguments()[0];
+        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[1];
 
         HostRoleCommand task = s.getHostRoleCommand(null, role);
 
@@ -774,7 +774,7 @@ public class TestActionScheduler {
         }
 
       }
-    }).when(db).getTasksByHostRoleAndStatus(anyString(), anyString(), any(HostRoleStatus.class));
+    }).when(db).getTasksByRoleAndStatus(anyString(), any(HostRoleStatus.class));
 
     ServerActionExecutor.init(injector);
     ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3,
@@ -843,8 +843,8 @@ public class TestActionScheduler {
     doAnswer(new Answer<List<HostRoleCommand>>() {
       @Override
       public List<HostRoleCommand> answer(InvocationOnMock invocation) throws Throwable {
-        String role = (String) invocation.getArguments()[1];
-        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[2];
+        String role = (String) invocation.getArguments()[0];
+        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[1];
 
         HostRoleCommand task = s.getHostRoleCommand(null, role);
 
@@ -854,7 +854,7 @@ public class TestActionScheduler {
           return Collections.emptyList();
         }
       }
-    }).when(db).getTasksByHostRoleAndStatus(anyString(), anyString(), any(HostRoleStatus.class));
+    }).when(db).getTasksByRoleAndStatus(anyString(), any(HostRoleStatus.class));
 
     ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3,
         new HostsMap((String) null), unitOfWork, null, conf);
@@ -1951,8 +1951,8 @@ public class TestActionScheduler {
     doAnswer(new Answer<List<HostRoleCommand>>() {
       @Override
       public List<HostRoleCommand> answer(InvocationOnMock invocation) throws Throwable {
-        String role = (String) invocation.getArguments()[1];
-        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[2];
+        String role = (String) invocation.getArguments()[0];
+        HostRoleStatus status = (HostRoleStatus) invocation.getArguments()[1];
 
         HostRoleCommand task = s.getHostRoleCommand(null, role);
 
@@ -1962,7 +1962,7 @@ public class TestActionScheduler {
           return Collections.emptyList();
         }
       }
-    }).when(db).getTasksByHostRoleAndStatus(anyString(), anyString(), any(HostRoleStatus.class));
+    }).when(db).getTasksByRoleAndStatus(anyString(), any(HostRoleStatus.class));
 
     doAnswer(new Answer<HostRoleCommand>() {
       @Override

+ 2 - 0
ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java

@@ -36,6 +36,7 @@ import org.apache.ambari.server.agent.rest.AgentResource;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.SecurityHelperImpl;
 import org.apache.ambari.server.stack.StackManagerFactory;
@@ -308,6 +309,7 @@ public class AgentResourceTest extends RandomPortJerseyTest {
       bind(HeartBeatHandler.class).toInstance(handler);
       bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
       bind(DBAccessor.class).toInstance(mock(DBAccessor.class));
+      bind(HostRoleCommandDAO.class).toInstance(mock(HostRoleCommandDAO.class));
     }
 
     private void installDependencies() {

+ 11 - 9
ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java

@@ -111,9 +111,9 @@ public class CheckDatabaseHelperTest {
 
     expect(mockDBDbAccessor.getConnection()).andReturn(mockConnection);
     expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
-    expect(mockStatement.executeQuery("select c.cluster_name,type_name from clusterconfigmapping ccm " +
+    expect(mockStatement.executeQuery("select c.cluster_name, ccm.type_name from clusterconfigmapping ccm " +
             "join clusters c on ccm.cluster_id=c.cluster_id " +
-            "group by c.cluster_name,type_name " +
+            "group by c.cluster_name, ccm.type_name " +
             "having sum(selected) > 1")).andReturn(mockResultSet);
 
     CheckDatabaseHelper checkDatabaseHelper = new CheckDatabaseHelper(mockDBDbAccessor, mockInjector, null);
@@ -201,8 +201,8 @@ public class CheckDatabaseHelperTest {
     expect(mockStatement.executeQuery("select count(*) from hostcomponentstate")).andReturn(mockResultSet);
     expect(mockStatement.executeQuery("select count(*) from hostcomponentdesiredstate")).andReturn(mockResultSet);
     expect(mockStatement.executeQuery("select count(*) FROM hostcomponentstate hcs " +
-            "JOIN hostcomponentdesiredstate hcds ON hcs.service_name = hcds.service_name AND " +
-            "hcs.component_name = hcds.component_name AND hcs.host_id = hcds.host_id")).andReturn(mockResultSet);
+            "JOIN hostcomponentdesiredstate hcds ON hcs.service_name=hcds.service_name AND " +
+            "hcs.component_name=hcds.component_name AND hcs.host_id=hcds.host_id")).andReturn(mockResultSet);
 
     CheckDatabaseHelper checkDatabaseHelper = new CheckDatabaseHelper(mockDBDbAccessor, mockInjector, null);
 
@@ -263,24 +263,26 @@ public class CheckDatabaseHelperTest {
     expect(mockStatement.executeQuery("select c.cluster_name, service_name from clusterservices cs " +
             "join clusters c on cs.cluster_id=c.cluster_id " +
             "where service_name not in (select service_name from serviceconfig sc where sc.cluster_id=cs.cluster_id and sc.service_name=cs.service_name and sc.group_id is null)")).andReturn(mockResultSet);
-    expect(mockStatement.executeQuery("select service_name from serviceconfig where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null")).andReturn(mockResultSet);
+    expect(mockStatement.executeQuery("select c.cluster_name, sc.service_name, sc.version from serviceconfig sc " +
+            "join clusters c on sc.cluster_id=c.cluster_id " +
+            "where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null")).andReturn(mockResultSet);
     expect(mockStatement.executeQuery("select c.cluster_name, s.stack_name, s.stack_version from clusters c " +
             "join stack s on c.desired_stack_id = s.stack_id")).andReturn(stackResultSet);
-    expect(mockStatement.executeQuery("select c.cluster_name, cs.service_name, type_name, sc.version from clusterservices cs " +
+    expect(mockStatement.executeQuery("select c.cluster_name, cs.service_name, cc.type_name, sc.version from clusterservices cs " +
             "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
             "join clusterconfig cc on scm.config_id=cc.config_id and sc.cluster_id=cc.cluster_id " +
             "join clusters c on cc.cluster_id=c.cluster_id " +
             "where sc.group_id is null " +
-            "group by c.cluster_name, cs.service_name, type_name, sc.version")).andReturn(serviceConfigResultSet);
-    expect(mockStatement.executeQuery("select c.cluster_name, cs.service_name,cc.type_name from clusterservices cs " +
+            "group by c.cluster_name, cs.service_name, cc.type_name, sc.version")).andReturn(serviceConfigResultSet);
+    expect(mockStatement.executeQuery("select c.cluster_name, cs.service_name, cc.type_name from clusterservices cs " +
             "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
             "join clusterconfig cc on scm.config_id=cc.config_id and cc.cluster_id=sc.cluster_id " +
             "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag and cc.cluster_id=ccm.cluster_id " +
             "join clusters c on ccm.cluster_id=c.cluster_id " +
             "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " +
-            "group by c.cluster_name,cs.service_name,cc.type_name " +
+            "group by c.cluster_name, cs.service_name, cc.type_name " +
             "having sum(ccm.selected) < 1")).andReturn(mockResultSet);
 
     CheckDatabaseHelper checkDatabaseHelper = new CheckDatabaseHelper(mockDBDbAccessor, mockInjector, null);

+ 95 - 0
ambari-server/src/test/java/org/apache/ambari/server/configuration/ConfigurationTest.java

@@ -563,4 +563,99 @@ public class ConfigurationTest {
     Assert.assertEquals(44, configuration.getPropertyProvidersThreadPoolMaxSize());
   }
 
+
+  public void testGetHostRoleCommandStatusSummaryCacheSize() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+    ambariProperties.setProperty(Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE, "3000");
+
+    // When
+    long actualCacheSize = configuration.getHostRoleCommandStatusSummaryCacheSize();
+
+    // Then
+    Assert.assertEquals(actualCacheSize, 3000L);
+  }
+
+  @Test
+  public void testGetHostRoleCommandStatusSummaryCacheSizeDefault() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+
+    // When
+    long actualCacheSize = configuration.getHostRoleCommandStatusSummaryCacheSize();
+
+    // Then
+    Assert.assertEquals(actualCacheSize, Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT);
+  }
+
+  @Test
+  public void testGetHostRoleCommandStatusSummaryCacheExpiryDuration() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+    ambariProperties.setProperty(Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION, "60");
+
+    // When
+    long actualCacheExpiryDuration = configuration.getHostRoleCommandStatusSummaryCacheExpiryDuration();
+
+    // Then
+    Assert.assertEquals(actualCacheExpiryDuration, 60L);
+  }
+
+  @Test
+  public void testGetHostRoleCommandStatusSummaryCacheExpiryDurationDefault() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+
+    // When
+    long actualCacheExpiryDuration = configuration.getHostRoleCommandStatusSummaryCacheExpiryDuration();
+
+    // Then
+    Assert.assertEquals(actualCacheExpiryDuration, Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT);
+  }
+
+  @Test
+  public void testGetHostRoleCommandStatusSummaryCacheEnabled() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+    ambariProperties.setProperty(Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED, "true");
+
+    // When
+    boolean actualCacheEnabledConfig = configuration.getHostRoleCommandStatusSummaryCacheEnabled();
+
+    // Then
+    Assert.assertEquals(actualCacheEnabledConfig, true);
+  }
+
+  @Test
+  public void testGetHostRoleCommandStatusSummaryCacheDisabled() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+    ambariProperties.setProperty(Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED, "false");
+
+    // When
+    boolean actualCacheEnabledConfig = configuration.getHostRoleCommandStatusSummaryCacheEnabled();
+
+    // Then
+    Assert.assertEquals(actualCacheEnabledConfig, false);
+  }
+
+  @Test
+  public void testGetHostRoleCommandStatusSummaryCacheEnabledDefault() throws  Exception {
+    // Given
+    final Properties ambariProperties = new Properties();
+    final Configuration configuration = new Configuration(ambariProperties);
+
+    // When
+    boolean actualCacheEnabledConfig = configuration.getHostRoleCommandStatusSummaryCacheEnabled();
+
+    // Then
+    Assert.assertEquals(actualCacheEnabledConfig, Configuration.SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED_DEFAULT);
+  }
+
 }

+ 3 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -5679,6 +5679,7 @@ public class AmbariManagementControllerTest {
       clusters.getCluster(clusterName).getService(serviceName)
       .getServiceComponents().values()) {
       Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
+      Assert.assertFalse(sc.isRecoveryEnabled()); // default value of recoveryEnabled
       for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
         Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
         Assert.assertEquals(State.INIT, sch.getState());
@@ -5695,6 +5696,7 @@ public class AmbariManagementControllerTest {
     for (ServiceComponent sc :
       clusters.getCluster(clusterName).getService(serviceName)
           .getServiceComponents().values()) {
+      sc.setRecoveryEnabled(true);
       for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
         sch.setState(State.INSTALLED);
       }
@@ -5714,6 +5716,7 @@ public class AmbariManagementControllerTest {
       clusters.getCluster(clusterName).getService(serviceName)
           .getServiceComponents().values()) {
       Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
+      Assert.assertTrue(sc.isRecoveryEnabled());
       for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
         Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
         Assert.assertEquals(State.INSTALLED, sch.getState());

+ 2 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java

@@ -42,6 +42,7 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.credential.PrincipalKeyCredential;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -215,6 +216,7 @@ public class KerberosHelperTest extends EasyMockSupport {
         bind(CreatePrincipalsServerAction.class).toInstance(createMock(CreatePrincipalsServerAction.class));
         bind(CreateKeytabFilesServerAction.class).toInstance(createMock(CreateKeytabFilesServerAction.class));
         bind(StackAdvisorHelper.class).toInstance(createMock(StackAdvisorHelper.class));
+        bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
       }
     });
 

+ 24 - 13
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java

@@ -225,11 +225,14 @@ public class ComponentResourceProviderTest {
     expect(service.getServiceComponents()).andReturn(serviceComponentMap).anyTimes();
 
     expect(serviceComponent1.convertToResponse()).andReturn(
-      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component100", null, "", 1, 1, 0));
+      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component100", null, "", 1, 1, 0,
+              true /* recovery enabled */));
     expect(serviceComponent2.convertToResponse()).andReturn(
-      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", 1, 1, 0));
+      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", 1, 1, 0,
+              false /* recovery not enabled */));
     expect(serviceComponent3.convertToResponse()).andReturn(
-      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, "", 1, 1, 0));
+      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, "", 1, 1, 0,
+              true /* recovery enabled */));
 
     expect(ambariMetaInfo.getComponent((String) anyObject(),
         (String) anyObject(), (String) anyObject(), (String) anyObject()))
@@ -258,6 +261,7 @@ public class ComponentResourceProviderTest {
     propertyIds.add(ComponentResourceProvider.COMPONENT_TOTAL_COUNT_PROPERTY_ID);
     propertyIds.add(ComponentResourceProvider.COMPONENT_STARTED_COUNT_PROPERTY_ID);
     propertyIds.add(ComponentResourceProvider.COMPONENT_INSTALLED_COUNT_PROPERTY_ID);
+    propertyIds.add(ComponentResourceProvider.COMPONENT_RECOVERY_ENABLED_ID);
 
     Predicate predicate = new PredicateBuilder()
       .property(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID)
@@ -282,6 +286,8 @@ public class ComponentResourceProviderTest {
         ComponentResourceProvider.COMPONENT_STARTED_COUNT_PROPERTY_ID));
       Assert.assertEquals(0, resource.getPropertyValue(
         ComponentResourceProvider.COMPONENT_INSTALLED_COUNT_PROPERTY_ID));
+      Assert.assertEquals(String.valueOf(true), resource.getPropertyValue(
+        ComponentResourceProvider.COMPONENT_RECOVERY_ENABLED_ID));
     }
 
     // verify
@@ -364,11 +370,14 @@ public class ComponentResourceProviderTest {
     expect(component3Info.getCategory()).andReturn(null);
 
     expect(serviceComponent1.convertToResponse()).andReturn(
-      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", 1, 0, 1));
+      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", 1, 0, 1,
+              false /* recovery not enabled */));
     expect(serviceComponent2.convertToResponse()).andReturn(
-      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, "", 1, 0, 1));
+      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, "", 1, 0, 1,
+              false /* recovery not enabled */));
     expect(serviceComponent3.convertToResponse()).andReturn(
-      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component103", null, "", 1, 0, 1));
+      new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component103", null, "", 1, 0, 1,
+              false /* recovery not enabled */));
     expect(serviceComponent1.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
     expect(serviceComponent2.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
     expect(serviceComponent3.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
@@ -412,6 +421,7 @@ public class ComponentResourceProviderTest {
 
     Map<String, Object> properties = new LinkedHashMap<String, Object>();
 
+    properties.put(ComponentResourceProvider.COMPONENT_RECOVERY_ENABLED_ID, String.valueOf(true) /* recovery enabled */);
     properties.put(ComponentResourceProvider.COMPONENT_STATE_PROPERTY_ID, "STARTED");
     properties.put(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
 
@@ -607,7 +617,7 @@ public class ComponentResourceProviderTest {
 
     // requests
     ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "service1", "component1",
-        null);
+        null, String.valueOf(true /* recovery enabled */));
 
     Set<ServiceComponentRequest> setRequests = new HashSet<ServiceComponentRequest>();
     setRequests.add(request1);
@@ -667,14 +677,15 @@ public class ComponentResourceProviderTest {
 
     // requests
     ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "service1", "component1",
-        null);
+        null, String.valueOf(true /* recovery enabled */));
     ServiceComponentRequest request2 = new ServiceComponentRequest("cluster1", "service1", "component2",
-        null);
+        null, String.valueOf(true /* recovery enabled */));
     ServiceComponentRequest request3 = new ServiceComponentRequest("cluster1", "service1", "component3",
-        null);
+        null, String.valueOf(true /* recovery enabled */));
     ServiceComponentRequest request4 = new ServiceComponentRequest("cluster1", "service1", "component4",
-        null);
-    ServiceComponentRequest request5 = new ServiceComponentRequest("cluster1", "service2", null, null);
+        null, String.valueOf(true /* recovery enabled */));
+    ServiceComponentRequest request5 = new ServiceComponentRequest("cluster1", "service2", null, null,
+              String.valueOf(true /* recovery enabled */));
 
     Set<ServiceComponentRequest> setRequests = new HashSet<ServiceComponentRequest>();
     setRequests.add(request1);
@@ -758,7 +769,7 @@ public class ComponentResourceProviderTest {
 
     // requests
     ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "service1", "component1",
-        null);
+        null, String.valueOf(true /* recovery enabled */));
 
     Set<ServiceComponentRequest> setRequests = new HashSet<ServiceComponentRequest>();
     setRequests.add(request1);

+ 18 - 17
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java

@@ -31,6 +31,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.FileReader;
 import java.lang.reflect.Type;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -62,6 +63,7 @@ import org.apache.commons.lang.StringUtils;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.springframework.util.Assert;
 
 /**
  * StackManager unit tests.
@@ -641,12 +643,16 @@ public class StackManagerTest {
         stack.getKerberosDescriptorFileLocation());
   }
 
-  @Ignore
   @Test
   public void testMetricsLoaded() throws Exception {
 
-    String stackRoot = ClassLoader.getSystemClassLoader().getResource("stacks").getPath().replace("test-classes","classes");
-    String commonServices = ClassLoader.getSystemClassLoader().getResource("common-services").getPath().replace("test-classes","classes");
+    URL rootDirectoryURL = StackManagerTest.class.getResource("/");
+    Assert.notNull(rootDirectoryURL);
+
+    File resourcesDirectory = new File(new File(rootDirectoryURL.getFile()).getParentFile().getParentFile(), "src/main/resources");
+
+    File stackRoot = new File(resourcesDirectory, "stacks");
+    File commonServices = new File(resourcesDirectory, "common-services");
 
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
@@ -660,7 +666,7 @@ public class StackManagerTest {
 
     OsFamily osFamily = new OsFamily(config);
 
-    StackManager stackManager = new StackManager(new File(stackRoot), new File(commonServices),
+    StackManager stackManager = new StackManager(stackRoot, commonServices,
             osFamily, metaInfoDao, actionMetadata, stackDao);
 
     for (StackInfo stackInfo : stackManager.getStacks()) {
@@ -682,12 +688,15 @@ public class StackManagerTest {
     }
   }
 
-  @Ignore
   @Test
   public void testServicesWithRangerPluginRoleCommandOrder() throws AmbariException {
-    // Given
-    String stackRoot = ClassLoader.getSystemClassLoader().getResource("stacks").getPath().replace("test-classes","classes");
-    String commonServices = ClassLoader.getSystemClassLoader().getResource("common-services").getPath().replace("test-classes","classes");
+    URL rootDirectoryURL = StackManagerTest.class.getResource("/");
+    Assert.notNull(rootDirectoryURL);
+
+    File resourcesDirectory = new File(new File(rootDirectoryURL.getFile()).getParentFile().getParentFile(), "src/main/resources");
+
+    File stackRoot = new File(resourcesDirectory, "stacks");
+    File commonServices = new File(resourcesDirectory, "common-services");
 
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
@@ -701,7 +710,7 @@ public class StackManagerTest {
 
     OsFamily osFamily = new OsFamily(config);
 
-    StackManager stackManager = new StackManager(new File(stackRoot), new File(commonServices), osFamily, metaInfoDao, actionMetadata, stackDao);
+    StackManager stackManager = new StackManager(stackRoot, commonServices, osFamily, metaInfoDao, actionMetadata, stackDao);
 
     String rangerUserSyncRoleCommand = Role.RANGER_USERSYNC + "-" + RoleCommand.START;
     String rangerAdminRoleCommand = Role.RANGER_ADMIN + "-" + RoleCommand.START;
@@ -783,14 +792,6 @@ public class StackManagerTest {
 
     assertTrue(rangerUserSyncRoleCommand + " should be dependent of " + rangerAdminRoleCommand, rangerUserSyncBlockers.contains(rangerAdminRoleCommand));
     assertTrue(rangerUserSyncRoleCommand + " should be dependent of " + kmsRoleCommand, rangerUserSyncBlockers.contains(kmsRoleCommand));
-
-    // Zookeeper Server
-    ArrayList<String> zookeeperBlockers = (ArrayList<String>)generalDeps.get(zookeeperServerRoleCommand);
-
-    assertTrue(zookeeperServerRoleCommand + " should be dependent of " + rangerUserSyncRoleCommand, zookeeperBlockers.contains(rangerUserSyncRoleCommand));
-
   }
-
-
   //todo: component override assertions
 }

+ 2 - 0
ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java

@@ -48,6 +48,7 @@ import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.stack.StackManagerFactory;
@@ -754,6 +755,7 @@ public class ConfigHelperTest {
           bind(Clusters.class).toInstance(createNiceMock(ClustersImpl.class));
           bind(ClusterController.class).toInstance(clusterController);
           bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
+          bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
         }
       });
 

+ 124 - 10
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java

@@ -19,12 +19,14 @@
 package org.apache.ambari.server.upgrade;
 
 
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Provider;
-import com.google.inject.persist.PersistService;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+
 import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -33,6 +35,7 @@ import org.apache.ambari.server.controller.ConfigurationRequest;
 import org.apache.ambari.server.controller.ConfigurationResponse;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.StackDAO;
@@ -40,6 +43,9 @@ import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockSupport;
@@ -47,10 +53,16 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import javax.persistence.EntityManager;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-import java.util.Map;
+import com.google.common.collect.Maps;
+import com.google.gson.Gson;
+import com.google.inject.AbstractModule;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.Provider;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.AmbariException;
 
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
@@ -58,6 +70,7 @@ import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
@@ -101,6 +114,8 @@ public class UpgradeCatalog222Test {
     Method updateAlerts = UpgradeCatalog222.class.getDeclaredMethod("updateAlerts");
     Method updateStormConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateStormConfigs");
     Method updateAMSConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateAMSConfigs");
+    Method updateHiveConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateHiveConfig");
+    Method updateHostRoleCommands = UpgradeCatalog222.class.getDeclaredMethod("updateHostRoleCommands");
 
 
     UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
@@ -108,6 +123,8 @@ public class UpgradeCatalog222Test {
             .addMockedMethod(updateAlerts)
             .addMockedMethod(updateStormConfigs)
             .addMockedMethod(updateAMSConfigs)
+            .addMockedMethod(updateHiveConfigs)
+            .addMockedMethod(updateHostRoleCommands)
             .createMock();
 
     upgradeCatalog222.addNewConfigurationsFromXml();
@@ -118,6 +135,10 @@ public class UpgradeCatalog222Test {
     expectLastCall().once();
     upgradeCatalog222.updateAMSConfigs();
     expectLastCall().once();
+    upgradeCatalog222.updateHostRoleCommands();
+    expectLastCall().once();
+    upgradeCatalog222.updateHiveConfig();
+    expectLastCall().once();
 
     replay(upgradeCatalog222);
 
@@ -126,6 +147,75 @@ public class UpgradeCatalog222Test {
     verify(upgradeCatalog222);
   }
 
+  @Test
+  public void testHiveSiteUpdateConfigs() throws AmbariException {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    final Config hiveSiteConfigs = easyMockSupport.createNiceMock(Config.class);
+    final Config AtlasSiteConfigs = easyMockSupport.createNiceMock(Config.class);
+
+    final ServiceComponentHost atlasHost = easyMockSupport.createNiceMock(ServiceComponentHost.class);
+    final List<ServiceComponentHost> atlasHosts = new ArrayList<>();
+    atlasHosts.add(atlasHost);
+
+    StackId stackId = new StackId("HDP","2.3");
+
+    final Map<String, String> propertiesAtlasSiteConfigs = new HashMap<String, String>() {{
+      put("atlas.enableTLS", "true");
+      put("atlas.server.https.port", "21443");
+    }};
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+        bind(ServiceComponentHost.class).toInstance(atlasHost);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(stackId).once();
+    expect(mockClusterExpected.getServiceComponentHosts("ATLAS", "ATLAS_SERVER")).andReturn(atlasHosts).once();
+    expect(atlasHost.getHostName()).andReturn("c6401").once();
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(hiveSiteConfigs).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("application-properties")).andReturn(AtlasSiteConfigs).anyTimes();
+    expect(AtlasSiteConfigs.getProperties()).andReturn(propertiesAtlasSiteConfigs).anyTimes();
+
+    UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
+      .withConstructor(Injector.class)
+      .withArgs(mockInjector)
+      .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+        Map.class, boolean.class, boolean.class)
+      .createMock();
+
+    Map<String, String> expectedUpdates = new HashMap<>();
+    expectedUpdates.put("atlas.hook.hive.minThreads", "1");
+    expectedUpdates.put("atlas.hook.hive.maxThreads", "1");
+    expectedUpdates.put("atlas.cluster.name", "primary");
+    expectedUpdates.put("atlas.rest.address", "https://c6401:21443");
+
+    upgradeCatalog222.updateConfigurationPropertiesForCluster(mockClusterExpected, "hive-site", expectedUpdates,
+      false, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog222);
+    upgradeCatalog222.updateHiveConfig();
+    easyMockSupport.verifyAll();
+  }
+
+
   @Test
   public void testAmsSiteUpdateConfigs() throws Exception{
 
@@ -203,4 +293,28 @@ public class UpgradeCatalog222Test {
 
   }
 
+  @Test
+  public void testUpdateHostRoleCommands() throws Exception {
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+    dbAccessor.createIndex(eq("idx_hrc_status"), eq("host_role_command"), eq("status"), eq("role"));
+    expectLastCall().once();
+
+    replay(dbAccessor);
+
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog222 upgradeCatalog222 = injector.getInstance(UpgradeCatalog222.class);
+    upgradeCatalog222.updateHostRoleCommands();
+
+
+    verify(dbAccessor);
+  }
+
 }

+ 12 - 0
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java

@@ -103,6 +103,7 @@ public class UpgradeCatalog240Test {
   @Test
   public void testExecuteDDLUpdates() throws SQLException, AmbariException {
     Capture<DBAccessor.DBColumnInfo> capturedColumnInfo = newCapture();
+    Capture<DBAccessor.DBColumnInfo> capturedScColumnInfo = newCapture();
     final DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
     Configuration configuration = createNiceMock(Configuration.class);
     Connection connection = createNiceMock(Connection.class);
@@ -111,6 +112,8 @@ public class UpgradeCatalog240Test {
     Capture<List<DBAccessor.DBColumnInfo>> capturedSettingColumns = EasyMock.newCapture();
 
     dbAccessor.addColumn(eq("adminpermission"), capture(capturedColumnInfo));
+    dbAccessor.addColumn(eq(UpgradeCatalog240.SERVICE_COMPONENT_DESIRED_STATE_TABLE), capture(capturedScColumnInfo));
+
     dbAccessor.createTable(eq("setting"), capture(capturedSettingColumns), eq("id"));
     expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
     expect(dbAccessor.getConnection()).andReturn(connection);
@@ -176,6 +179,15 @@ public class UpgradeCatalog240Test {
     Assert.assertEquals(1, columnInfo.getDefaultValue());
     Assert.assertEquals(false, columnInfo.isNullable());
 
+    // Verify if recovery_enabled column was added to servicecomponentdesiredstate table
+    DBAccessor.DBColumnInfo columnScInfo = capturedScColumnInfo.getValue();
+    Assert.assertNotNull(columnScInfo);
+    Assert.assertEquals(UpgradeCatalog240.RECOVERY_ENABLED_COL, columnScInfo.getName());
+    Assert.assertEquals(null, columnScInfo.getLength());
+    Assert.assertEquals(Short.class, columnScInfo.getType());
+    Assert.assertEquals(0, columnScInfo.getDefaultValue());
+    Assert.assertEquals(false, columnScInfo.isNullable());
+
     Map<String, Class> expectedCaptures = new HashMap<>();
     expectedCaptures.put("id", Long.class);
     expectedCaptures.put("name", String.class);

+ 2 - 0
ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java

@@ -37,6 +37,7 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.HostDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.stack.StackManagerFactory;
@@ -118,6 +119,7 @@ public class StageUtilsTest extends EasyMockSupport {
         bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
         bind(HostDAO.class).toInstance(createNiceMock(HostDAO.class));
         bind(PersistedState.class).toInstance(createNiceMock(PersistedState.class));
+        bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
       }
     });
 

+ 6 - 6
ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py

@@ -1820,7 +1820,7 @@ class TestHDP206StackAdvisor(TestCase):
     )
     recommendedDefaults = {"property1": "file:///grid/0/var/dir"}
     warn = self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo)
-    self.assertIsNotNone(warn)
+    self.assertTrue(warn != None)
     self.assertEquals({'message': 'It is not recommended to use root partition for property1', 'level': 'WARN'}, warn)
 
     # Set by user /var mountpoint, which is non-root , but not preferable - no warning
@@ -1831,7 +1831,7 @@ class TestHDP206StackAdvisor(TestCase):
         "mountpoint" : "/var"
       }
     )
-    self.assertIsNone(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo))
+    self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
 
   def test_validatorEnoughDiskSpace(self):
     reqiuredDiskSpace = 1048576
@@ -1847,7 +1847,7 @@ class TestHDP206StackAdvisor(TestCase):
       }
     ]}
     properties = {"property1": "file:///var/dir"}
-    self.assertIsNone(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace))
+    self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
 
     # local FS, no enough space
     hostInfo = {"disk_info": [
@@ -1858,16 +1858,16 @@ class TestHDP206StackAdvisor(TestCase):
       }
     ]}
     warn = self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace)
-    self.assertIsNotNone(warn)
+    self.assertTrue(warn != None)
     self.assertEquals({'message': errorMsg, 'level': 'WARN'}, warn)
 
     # non-local FS, HDFS
     properties = {"property1": "hdfs://h1"}
-    self.assertIsNone(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace))
+    self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
 
     # non-local FS, WASB
     properties = {"property1": "wasb://h1"}
-    self.assertIsNone(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace))
+    self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
 
   def test_round_to_n(self):
     self.assertEquals(self.stack_advisor_impl.round_to_n(0), 0)

+ 12 - 2
ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py

@@ -1028,7 +1028,7 @@ class TestHDP22StackAdvisor(TestCase):
       'hive-site': {
         'properties': {
           'hive.server2.enable.doAs': 'true',
-          'hive.server2.tez.default.queues': "default",
+          'hive.server2.tez.default.queues': "queue1,queue2",
           'hive.server2.tez.initialize.default.sessions': 'false',
           'hive.server2.tez.sessions.per.default.queue': '1',
           'hive.auto.convert.join.noconditionaltask.size': '268435456',
@@ -1073,7 +1073,16 @@ class TestHDP22StackAdvisor(TestCase):
          'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
          'hive.server2.authentication.ldap.url': {'delete': 'true'},
          'hive.server2.tez.default.queues': {
-           'entries': [{'value': 'default', 'label': 'default queue'}]
+           "entries": [
+             {
+               "value": "queue1",
+               "label": "queue1 queue"
+             },
+             {
+               "value": "queue2",
+               "label": "queue2 queue"
+             }
+           ]
           }
         }
       },
@@ -2052,6 +2061,7 @@ class TestHDP22StackAdvisor(TestCase):
           "timeline.metrics.cluster.aggregate.splitpoints": " ",
           "timeline.metrics.host.aggregate.splitpoints": " ",
           "timeline.metrics.host.aggregator.ttl": "1",
+          "timeline.metrics.service.handler.thread.count": "20",
           'timeline.metrics.service.watcher.disabled': 'false'
         }
       }

+ 46 - 0
ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py

@@ -1630,6 +1630,7 @@ class TestHDP23StackAdvisor(TestCase):
   def test_validateHAWQConfigurations(self):
     services = self.load_json("services-hawq-3-hosts.json")
     # setup default configuration values
+    # Test hawq_rm_yarn_address and hawq_rm_scheduler_address are set correctly
     configurations = services["configurations"]
     configurations["hawq-site"] = {"properties": {"hawq_rm_yarn_address": "localhost:8032",
                                                   "hawq_rm_yarn_scheduler_address": "localhost:8030"}}
@@ -1664,3 +1665,48 @@ class TestHDP23StackAdvisor(TestCase):
     self.assertEqual(len(problems), 2)
     self.assertEqual(problems_dict, expected_warnings)
 
+    # Test hawq_global_rm_type validation
+    services = {
+                 "services" : [
+                   {
+                     "StackServices" : {
+                     "service_name" : "HAWQ"
+                     },
+                     "components": []
+                   } ],
+                 "configurations":
+                   {
+                     "hawq-site": {
+                       "properties": {
+                         "hawq_global_rm_type": "yarn"
+                       }
+                     }
+                   }
+                }
+    properties = services["configurations"]["hawq-site"]["properties"]
+
+    # case 1: hawq_global_rm_type is set as yarn, but YARN service is not installed. Validation error expected.
+    """
+    Validation error expected is as below:
+                    [ {
+                          "config-type": "hawq-site",
+                          "message": "hawq_global_rm_type must be set to none if YARN service is not installed",
+                          "type": "configuration",
+                          "config-name": "hawq_global_rm_type",
+                          "level": "ERROR"
+                    } ]
+    """
+    problems = self.stackAdvisor.validateHAWQConfigurations(properties, defaults, services["configurations"], services, hosts)
+    self.assertEqual(len(problems), 1)
+    self.assertEqual(problems[0]["config-type"], "hawq-site")
+    self.assertEqual(problems[0]["message"], "hawq_global_rm_type must be set to none if YARN service is not installed")
+    self.assertEqual(problems[0]["type"], "configuration")
+    self.assertEqual(problems[0]["config-name"], "hawq_global_rm_type")
+    self.assertEqual(problems[0]["level"], "ERROR")
+
+
+    # case 2: hawq_global_rm_type is set as yarn, and YARN service is installed. No validation errors expected.
+    services["services"].append({"StackServices" : {"service_name" : "YARN"}, "components":[]})
+
+    problems = self.stackAdvisor.validateHAWQConfigurations(properties, defaults, services["configurations"], services, hosts)
+    self.assertEqual(len(problems), 0)

+ 1 - 0
ambari-web/app/assets/test/tests.js

@@ -239,6 +239,7 @@ var files = [
   'test/views/main/dashboard/widgets/uptime_text_widget_test',
   'test/views/main/dashboard/widgets/node_managers_live_test',
   'test/views/main/dashboard/widgets/datanode_live_test',
+  'test/views/main/dashboard/widgets/hawqsegment_live_test',
   'test/views/main/dashboard/widgets/hbase_average_load_test',
   'test/views/main/dashboard/widgets/hbase_regions_in_transition_test',
   'test/views/main/dashboard/widgets/namenode_rpc_test',

+ 1 - 1
ambari-web/app/controllers/main/admin/highAvailability/hawq/addStandby/step3_controller.js

@@ -141,7 +141,7 @@ App.AddHawqStandbyWizardStep3Controller = Em.Controller.extend({
   submit: function () {
     if (!this.get('isSubmitDisabled')) {
       dataDir = this.get('hawqProps').items[0].properties['hawq_master_directory'];
-      hawqStandby = this.get('hawqProps').items[0].properties['hawq_standby_address_host']
+      hawqStandby = this.get('content.hawqHosts.newHawqStandby');
       App.showConfirmationPopup(
         function() {
           App.get('router.mainAdminKerberosController').getKDCSessionState(function() {

+ 5 - 0
ambari-web/app/mappers/components_state_mapper.js

@@ -59,6 +59,11 @@ App.componentsStateMapper = App.QuickDataMapper.create({
       node_managers_installed: 'INSTALLED_PATH',
       node_managers_total: 'TOTAL_PATH'
     },
+    'HAWQSEGMENT': {
+      hawq_segments_started: 'STARTED_PATH',
+      hawq_segments_installed: 'INSTALLED_PATH',
+      hawq_segments_total: 'TOTAL_PATH'
+    },
     'HBASE_REGIONSERVER': {
       region_servers_started: 'STARTED_PATH',
       region_servers_installed: 'INSTALLED_PATH',

+ 5 - 0
ambari-web/app/messages.js

@@ -2532,6 +2532,7 @@ Em.I18n.translations = {
   'dashboard.widgets.YARNLinks': 'YARN Links',
   'dashboard.widgets.error.invalid': 'Invalid! Enter a number between 0 - {0}',
   'dashboard.widgets.error.smaller': 'Threshold 1 should be smaller than threshold 2!',
+  'dashboard.widgets.HawqSegmentUp': 'HAWQ Segments Live',
 
   'dashboard': {
     'widgets': {
@@ -2636,6 +2637,10 @@ Em.I18n.translations = {
   'dashboard.services.hbase.masterStarted':'Master Started',
   'dashboard.services.hbase.masterActivated':'Master Activated',
 
+  'dashboard.services.hawq.segments.started':'started',
+  'dashboard.services.hawq.segments.stopped':'stopped',
+  'dashboard.services.hawq.segments.total':'in total',
+
   'dashboard.services.hive.clients':'Hive Clients',
   'dashboard.services.hive.client':'Hive Client',
   'dashboard.services.hive.metastore':'Hive Metastore',

+ 2 - 2
ambari-web/app/models/alerts/alert_definition.js

@@ -41,8 +41,8 @@ App.AlertDefinition = DS.Model.extend({
   groups: DS.hasMany('App.AlertGroup'),
   reporting: DS.hasMany('App.AlertReportDefinition'),
   parameters: DS.hasMany('App.AlertDefinitionParameter'),
-  lastTriggered: DS.attr('number'),
-  lastTriggeredRaw: DS.attr('number'),
+  lastTriggered: 0,
+  lastTriggeredRaw: 0,
 
   //relates only to SCRIPT-type alert definition
   location: DS.attr('string'),

+ 1 - 0
ambari-web/app/views.js

@@ -220,6 +220,7 @@ require('views/main/dashboard/widgets/namenode_heap');
 require('views/main/dashboard/widgets/namenode_cpu');
 require('views/main/dashboard/widgets/hdfs_capacity');
 require('views/main/dashboard/widgets/datanode_live');
+require('views/main/dashboard/widgets/hawqsegment_live');
 require('views/main/dashboard/widgets/namenode_rpc');
 require('views/main/dashboard/widgets/metrics_memory');
 require('views/main/dashboard/widgets/metrics_network');

+ 5 - 6
ambari-web/app/views/common/configs/widgets/list_config_widget_view.js

@@ -288,16 +288,15 @@ App.ListConfigWidgetView = App.ConfigWidgetView.extend({
   },
 
   isOptionExist: function(value) {
-    var isExist = false;
-    if (value !== null && value !== undefined) {
+    var isExist = true;
+    if (Em.isNone(value)) {
+      return !isExist;
+    } else {
       value = Em.typeOf(value) == 'string' ? value.split(',') : value;
       value.forEach(function(item) {
-        isExist = this.get('options').mapProperty('value').contains(item);
+        isExist = isExist && this.get('options').mapProperty('value').contains(item);
       }, this);
       return isExist;
-    } else {
-      return false;
     }
   }
-
 });

+ 15 - 4
ambari-web/app/views/main/dashboard/widgets.js

@@ -128,7 +128,8 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, Ap
       '13', '12', '14', '16', //hbase
       '17', '18', '19', '20', '23', // all yarn
       '21', // storm
-      '22' // flume
+      '22', // flume
+      '24' // hawq
     ]; // all in order
     var hiddenFull = [
       ['15', 'Region In Transition']
@@ -173,6 +174,12 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, Ap
         visibleFull = visibleFull.without(item);
       }, this);
     }
+    if (this.get('hawq_model') == null) {
+      var hawq = ['24'];
+      hawq.forEach(function (item) {
+        visibleFull = visibleFull.without(item);
+      }, this);
+    }
     var obj = this.get('initPrefObject');
     obj.set('visible', visibleFull);
     obj.set('hidden', hiddenFull);
@@ -192,6 +199,8 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, Ap
 
   flume_model: null,
 
+  hawq_model: null,
+
   /**
    * List of visible widgets
    * @type {Ember.Enumerable}
@@ -383,7 +392,8 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, Ap
       hbase_model: ['12', '13', '14', '15', '16'],
       yarn_model: ['17', '18', '19', '20', '23'],
       storm_model: ['21'],
-      flume_model: ['22']
+      flume_model: ['22'],
+      hawq_model: ['24']
     };
 
     // check each service, find out the newly added service and already deleted service
@@ -450,7 +460,8 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, Ap
       '20': App.YARNMemoryPieChartView,
       '21': App.SuperVisorUpView,
       '22': App.FlumeAgentUpView,
-      '23': App.YARNLinksView
+      '23': App.YARNLinksView,
+      '24': App.HawqSegmentUpView
     }, id);
   },
 
@@ -467,7 +478,7 @@ App.MainDashboardWidgetsView = Em.View.extend(App.UserPref, App.LocalStorage, Ap
     visible: [],
     hidden: [],
     threshold: {1: [80, 90], 2: [85, 95], 3: [90, 95], 4: [80, 90], 5: [1000, 3000], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: [], 13: [70, 90], 14: [150, 250], 15: [3, 10], 16: [],
-      17: [70, 90], 18: [], 19: [50, 75], 20: [50, 75], 21: [85, 95], 22: [85, 95], 23: []} // id:[thresh1, thresh2]
+      17: [70, 90], 18: [], 19: [50, 75], 20: [50, 75], 21: [85, 95], 22: [85, 95], 23: [], 24: [80, 90]} // id:[thresh1, thresh2]
   }),
 
   /**

+ 190 - 0
ambari-web/app/views/main/dashboard/widgets/hawqsegment_live.js

@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+App.HawqSegmentUpView = App.TextDashboardWidgetView.extend({
+
+  title: Em.I18n.t('dashboard.widgets.HawqSegmentUp'),
+  id: '24',
+
+  isPieChart: false,
+  isText: true,
+  isProgressBar: false,
+  model_type: 'hawq',
+
+  hiddenInfo: function () {
+    var result = [];
+    result.pushObject(this.get('hawqSegmentsStarted') + ' ' + Em.I18n.t('dashboard.services.hawq.segments.started'));
+    result.pushObject(this.get('hawqSegmentsInstalled') + ' ' + Em.I18n.t('dashboard.services.hawq.segments.stopped'));
+    result.pushObject(this.get('hawqSegmentsTotal')+ ' ' + Em.I18n.t('dashboard.services.hawq.segments.total'));
+    return result;
+  }.property('hawqSegmentsStarted', 'hawqSegmentsInstalled', 'hawqSegmentsTotal'),
+  hiddenInfoClass: "hidden-info-three-line",
+
+  thresh1: 40,
+  thresh2: 70,
+  maxValue: 100,
+
+  hawqSegmentsStarted: function () {
+    if (Em.isNone(this.get('model.hawqSegmentsStarted'))) {
+      return Em.I18n.t('services.service.summary.notAvailable');
+    }
+    return this.get('model.hawqSegmentsStarted');
+  }.property('model.hawqSegmentsStarted'),
+
+  hawqSegmentsInstalled: function () {
+    if (Em.isNone(this.get('model.hawqSegmentsInstalled'))) {
+      return Em.I18n.t('services.service.summary.notAvailable');
+    }
+    return this.get('model.hawqSegmentsInstalled');
+  }.property('model.hawqSegmentsInstalled'),
+
+  hawqSegmentsTotal: function () {
+    if (Em.isNone(this.get('model.hawqSegmentsTotal'))) {
+      return Em.I18n.t('services.service.summary.notAvailable');
+    }
+    return this.get('model.hawqSegmentsTotal');
+  }.property('model.hawqSegmentsTotal'),
+
+  data: function () {
+    if (Em.isNone(this.get('model.hawqSegmentsStarted')) || Em.isNone(this.get('model.hawqSegmentsTotal'))) {
+      return null;
+    } else {
+      return ((this.get('hawqSegmentsStarted') / this.get('model.hawqSegmentsTotal')).toFixed(2)) * 100;
+    }
+  }.property('model.hawqSegmentsTotal', 'hawqSegmentsStarted'),
+
+  content: function () {
+    if (Em.isNone(this.get('model.hawqSegmentsStarted')) || Em.isNone(this.get('model.hawqSegmentsTotal'))) {
+      return Em.I18n.t('services.service.summary.notAvailable');
+    } else {
+      return this.get('hawqSegmentsStarted') + "/" + this.get('model.hawqSegmentsTotal');
+    }
+  }.property('model.hawqSegmentsTotal', 'hawqSegmentsStarted'),
+
+  editWidget: function (event) {
+    var parent = this;
+    var max_tmp =  parseFloat(parent.get('maxValue'));
+    var configObj = Ember.Object.create({
+      thresh1: parent.get('thresh1') + '',
+      thresh2: parent.get('thresh2') + '',
+      hintInfo: Em.I18n.t('dashboard.widgets.hintInfo.hint1').format(max_tmp),
+      isThresh1Error: false,
+      isThresh2Error: false,
+      errorMessage1: "",
+      errorMessage2: "",
+      maxValue: max_tmp,
+      observeNewThresholdValue: function () {
+        var thresh1 = this.get('thresh1');
+        var thresh2 = this.get('thresh2');
+        if (thresh1.trim() != "") {
+          if (isNaN(thresh1) || thresh1 > max_tmp || thresh1 < 0){
+            this.set('isThresh1Error', true);
+            this.set('errorMessage1', 'Invalid! Enter a number between 0 - ' + max_tmp);
+          } else if ( this.get('isThresh2Error') === false && parseFloat(thresh2)<= parseFloat(thresh1)) {
+            this.set('isThresh1Error', true);
+            this.set('errorMessage1', 'Threshold 1 should be smaller than threshold 2 !');
+          } else {
+            this.set('isThresh1Error', false);
+            this.set('errorMessage1', '');
+          }
+        } else {
+          this.set('isThresh1Error', true);
+          this.set('errorMessage1', 'This is required');
+        }
+
+        if (thresh2.trim() != "") {
+          if (isNaN(thresh2) || thresh2 > max_tmp || thresh2 < 0) {
+            this.set('isThresh2Error', true);
+            this.set('errorMessage2', 'Invalid! Enter a number between 0 - ' + max_tmp);
+          } else {
+            this.set('isThresh2Error', false);
+            this.set('errorMessage2', '');
+          }
+        } else {
+          this.set('isThresh2Error', true);
+          this.set('errorMessage2', 'This is required');
+        }
+
+        // update the slider handles and color
+        if (this.get('isThresh1Error') === false && this.get('isThresh2Error') === false) {
+          $("#slider-range").slider('values', 0 , parseFloat(thresh1));
+          $("#slider-range").slider('values', 1 , parseFloat(thresh2));
+        }
+      }.observes('thresh1', 'thresh2')
+
+    });
+
+    var browserVerion = this.getInternetExplorerVersion();
+    App.ModalPopup.show({
+      header: Em.I18n.t('dashboard.widgets.popupHeader'),
+      classNames: [ 'sixty-percent-width-modal-edit-widget'],
+      bodyClass: Ember.View.extend({
+        templateName: require('templates/main/dashboard/edit_widget_popup'),
+        configPropertyObj: configObj
+      }),
+      primary: Em.I18n.t('common.apply'),
+      onPrimary: function () {
+        configObj.observeNewThresholdValue();
+        if (!configObj.isThresh1Error && !configObj.isThresh2Error) {
+          parent.set('thresh1', parseFloat(configObj.get('thresh1')) );
+          parent.set('thresh2', parseFloat(configObj.get('thresh2')) );
+          if (!App.get('testMode')) {
+            var big_parent = parent.get('parentView');
+            big_parent.getUserPref(big_parent.get('persistKey'));
+            var oldValue = big_parent.get('currentPrefObject');
+            oldValue.threshold[parseInt(parent.id)] = [configObj.get('thresh1'), configObj.get('thresh2')];
+            big_parent.postUserPref(big_parent.get('persistKey'),oldValue);
+          }
+          this.hide();
+        }
+      },
+
+      didInsertElement: function () {
+        var handlers = [configObj.get('thresh1'), configObj.get('thresh2')];
+        var colors = [App.healthStatusRed, App.healthStatusOrange, App.healthStatusGreen]; //color red, orange, green
+
+        if (browserVerion == -1 || browserVerion > 9) {
+          configObj.set('isIE9', false);
+          configObj.set('isGreenOrangeRed', false);
+          $("#slider-range").slider({
+            range: true,
+            min: 0,
+            max: max_tmp,
+            values: handlers,
+            create: function (event, ui) {
+              parent.updateColors(handlers, colors);
+            },
+            slide: function (event, ui) {
+              parent.updateColors(ui.values, colors);
+              configObj.set('thresh1', ui.values[0] + '');
+              configObj.set('thresh2', ui.values[1] + '');
+            },
+            change: function (event, ui) {
+              parent.updateColors(ui.values, colors);
+            }
+          });
+        } else {
+          configObj.set('isIE9', true);
+          configObj.set('isGreenOrangeRed', false);
+        }
+      }
+    });
+  }
+});

+ 1 - 1
ambari-web/test/controllers/installer_test.js

@@ -36,7 +36,7 @@ describe('App.InstallerController', function () {
       c = App.InstallerController.create({});
     });
     it('all steps are disabled by default', function () {
-      expect(c.get('isStepDisabled.length') > 0 ).to.be.ok;
+      expect(c.get('isStepDisabled.length')).to.be.above(0);
       expect(c.get('isStepDisabled').everyProperty('value', true)).to.be.ok;
     });
   });

+ 2 - 2
ambari-web/test/controllers/main/admin/highAvailability/progress_popup_controller_test.js

@@ -46,11 +46,11 @@ describe('App.HighAvailabilityProgressPopupController', function () {
       });
 
       it('taskInfo.id = 2', function () {
-        expect(controller.get('taskInfo.id'), 2);
+        expect(controller.get('taskInfo.id')).to.be.equal(2);
       });
 
       it('taskInfo.requestId = 1', function () {
-        expect(controller.get('taskInfo.requestId'), 1);
+        expect(controller.get('taskInfo.requestId')).to.be.equal(1);
       });
 
       it('App.updater.run is called once', function () {

+ 2 - 6
ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js

@@ -1020,23 +1020,19 @@ describe('App.ManageAlertNotificationsController', function () {
       describe('#errorHandler', function () {
 
         it('should fire invalid name', function () {
-
           view.set('controller.newCustomProperty.name', '!!');
           view.errorsHandler();
           expect(view.get('isError')).to.be.true;
           expect(view.get('parentView.disablePrimary')).to.be.true;
-          expect(view.get('errorMessage.length') > 0).to.be.true;
-
+          expect(view.get('errorMessage.length')).to.be.above(0);
         });
 
         it('should fire existing property name', function () {
-
           view.set('controller.newCustomProperty.name', 'n1');
           view.errorsHandler();
           expect(view.get('isError')).to.be.true;
           expect(view.get('parentView.disablePrimary')).to.be.true;
-          expect(view.get('errorMessage.length') > 0).to.be.true;
-
+          expect(view.get('errorMessage.length')).to.be.above(0);
         });
 
       });

+ 1 - 1
ambari-web/test/controllers/main/service/info/config_test.js

@@ -478,7 +478,7 @@ describe("App.MainServiceInfoConfigsController", function () {
     });
     it("ajax request to put cluster cfg", function () {
       mainServiceInfoConfigsController.set('stepConfigs', sc);
-      expect(mainServiceInfoConfigsController.putChangedConfigurations([]));
+      mainServiceInfoConfigsController.putChangedConfigurations([]);
       var args = testHelpers.findAjaxRequest('name', 'common.across.services.configurations');
       expect(args[0]).exists;
     });

+ 1 - 1
ambari-web/test/controllers/main/service/widgets/create/step2_controller_test.js

@@ -731,7 +731,7 @@ describe('App.WidgetWizardStep2Controller', function () {
     });
     it("user is moved to the next step", function () {
       controller.next();
-      expect(App.router.send.calledWith('next'));
+      expect(App.router.send.calledWith('next')).to.be.true;
     });
   });
 });

+ 0 - 12
ambari-web/test/controllers/main/service_test.js

@@ -178,12 +178,6 @@ describe('App.MainServiceController', function () {
       expect(r).to.be.null;
     });
 
-    it('nothing disabled', function() {
-      var event = {target: {}}, query = 'query';
-      mainServiceController.startAllService(event).onPrimary(query);
-      expect(mainServiceController.allServicesCall.calledWith('STARTED', query));
-    });
-
   });
 
   describe('#stopAllService', function() {
@@ -208,12 +202,6 @@ describe('App.MainServiceController', function () {
       expect(r).to.be.null;
     });
 
-    it('nothing disabled', function() {
-      var event = {target: {}}, query = 'query';
-      mainServiceController.stopAllService(event).onPrimary(query);
-      expect(mainServiceController.allServicesCall.calledWith('STARTED', query));
-    });
-
   });
 
   describe('#startStopAllService', function() {

+ 0 - 18
ambari-web/test/controllers/wizard/step3_test.js

@@ -380,24 +380,6 @@ describe('App.WizardStep3Controller', function () {
 
   });
 
-  describe('#removeHost', function () {
-
-    beforeEach(function () {
-      sinon.stub(c, 'removeHosts', Em.K);
-    });
-
-    afterEach(function () {
-      c.removeHosts.restore();
-    });
-
-    it('should call removeHosts with array as arg', function () {
-      var host = {a: ''};
-      c.removeHost(host);
-      expect(c.removeHosts.calledWith([host]));
-    });
-
-  });
-
   describe('#removeSelectedHosts', function () {
 
     beforeEach(function () {

+ 1 - 1
ambari-web/test/controllers/wizard/step7_test.js

@@ -1733,7 +1733,7 @@ describe('App.InstallerStep7Controller', function () {
     it('should copy properties from hdfs-site to hdfs-client for HAWQ', function() {
       installerStep7Controller.addHawqConfigsOnNnHa(configs);
       // ensure 6 new configs were added
-      expect(configs.length - oldConfigs.length).to.be.equal(6);
+      expect(configs.length).to.be.equal(oldConfigs.length + 6);
     });
 
     describe('find the same property in hdfs-client for HAWQ and see if attribute value matches with the corresponding property\'s attribute value in hdfs-site', function () {

+ 6 - 1
ambari-web/test/controllers/wizard/step9_test.js

@@ -974,7 +974,12 @@ describe('App.InstallerStep9Controller', function () {
     tests.forEach(function (test) {
       it(test.m, function () {
         c.onInProgressPerHost(test.actions, test.host);
-        expect(test.host.message === test.e.message).to.equal(test.e.b);
+        if (test.e.b) {
+          expect(test.host.message).to.be.equal(test.e.message);
+        }
+        else {
+          expect(test.host.message).to.be.not.equal(test.e.message);
+        }
       });
     });
   });

+ 4 - 4
ambari-web/test/mappers/server_data_mapper_test.js

@@ -113,11 +113,11 @@ describe('App.QuickDataMapper', function () {
     });
 
     it('numeric array. element doesn\'t exists', function () {
-      expect(mapper.binaryIndexOf(array1, 0) < 0).to.be.true;
+      expect(mapper.binaryIndexOf(array1, 0)).to.be.below(0);
     });
 
     it('numeric array. element doesn\'t exists 2', function () {
-      expect(mapper.binaryIndexOf(array1, 10) < 0).to.be.true;
+      expect(mapper.binaryIndexOf(array1, 10)).to.be.below(0);
     });
 
     array2.forEach(function(item, index) {
@@ -127,11 +127,11 @@ describe('App.QuickDataMapper', function () {
     });
 
     it('string array. element doesn\'t exists', function () {
-      expect(mapper.binaryIndexOf(array2, 'a') < 0).to.be.true;
+      expect(mapper.binaryIndexOf(array2, 'a')).to.be.below(0);
     });
 
     it('string array. element doesn\'t exists 2', function () {
-      expect(mapper.binaryIndexOf(array2, 'q') < 0).to.be.true;
+      expect(mapper.binaryIndexOf(array2, 'q')).to.be.below(0);
     });
 
   });

+ 5 - 5
ambari-web/test/mixins/common/configs/configs_saver_test.js

@@ -59,14 +59,14 @@ describe('App.ConfigsSaverMixin', function() {
       currentServices: stackServices[2],
       res: false,
       m: 'not ok'
-    }].forEach(function (c) {
+    }].forEach(function (c, index) {
         describe(c.m, function () {
           beforeEach(function () {
             instanceObject.reopen({
               currentServices: c.currentServices
             });
-            it('', function () {
-              expect(instanceObject.allowSaveCoreSite()).to.equal(c.res);
+            it('test #' + index, function () {
+              expect(instanceObject.allowSaveCoreSite()).to.be.equal(c.res);
             });
           });
         });
@@ -79,7 +79,7 @@ describe('App.ConfigsSaverMixin', function() {
       { fName: 'core-site', res: true, allowSaveCoreSite: true, m: 'core site is allowed to be saved' },
       { fName: 'core-site', res: false, allowSaveCoreSite: false, m: 'core site is not allowed to be saved' },
       { fName: 'other-file-name', res: true, m: 'file name has not restriction rule, so can be saved' }
-    ].forEach(function (c) {
+    ].forEach(function (c, index) {
         describe(c.m, function () {
           beforeEach(function() {
             sinon.stub(instanceObject, 'allowSaveCoreSite').returns(c.allowSaveCoreSite);
@@ -87,7 +87,7 @@ describe('App.ConfigsSaverMixin', function() {
           afterEach(function() {
             instanceObject.allowSaveCoreSite.restore();
           });
-          it('', function () {
+          it('test #' + index, function () {
             expect(instanceObject.allowSaveSite(c.fName)).to.equal(c.res);
           });
         });

+ 1 - 1
ambari-web/test/mixins/main/host/details/host_components/install_component_test.js

@@ -109,7 +109,7 @@ describe('App.InstallComponent', function () {
 
     it("App.ajax.defaultErrorHandler should be called", function() {
       installComponent.ajaxErrorCallback({}, {}, 'error', {method: 'method1', url: 'url1'}, {});
-      expect(App.ajax.defaultErrorHandler.calledWith({}, 'url1', 'method1'));
+      expect(App.ajax.defaultErrorHandler.calledWith({}, 'url1', 'method1')).to.be.true;
     });
   });
 

+ 1 - 2
ambari-web/test/models/alerts/alert_instance_test.js

@@ -51,8 +51,7 @@ describe('App.AlertInstance', function () {
         lastTriggeredFormatted: lastTriggeredFormatted
       });
       var status = model.get('statusChangedAndLastCheckedFormatted');
-      expect(status.indexOf(lastCheckedFormatted) > status.indexOf(lastTriggeredFormatted)).to.be.true;
-
+      expect(status.indexOf(lastCheckedFormatted)).to.be.above(status.indexOf(lastTriggeredFormatted));
     });
 
   });

+ 1 - 1
ambari-web/test/utils/form_field_test.js

@@ -34,7 +34,7 @@ describe('App.FormField', function () {
       formField.set('value', '');
       formField.set('isRequired', true);
       formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
+      expect(formField.get('errorMessage')).to.be.not.equal('');
     });
     /*REQUIRE END*/
 

+ 2 - 2
ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js

@@ -127,8 +127,8 @@ describe('App.SliderConfigWidgetView', function () {
 
   describe('#mirrorValue', function () {
     it('should be equal to config.value after init', function () {
-      expect('' + viewInt.get('mirrorValue')).to.equal(viewInt.get('config.value'));
-      expect('' + viewFloat.get('mirrorValue')).to.equal(viewFloat.get('config.value'));
+      expect(viewInt.get('mirrorValue').toString()).to.be.equal(viewInt.get('config.value'));
+      expect(viewFloat.get('mirrorValue').toString()).to.be.equal(viewFloat.get('config.value'));
     });
 
     it('should be converted according to widget format', function() {

+ 1 - 1
ambari-web/test/views/common/table_view_test.js

@@ -50,7 +50,7 @@ describe('App.TableView', function () {
 
     it('should set filterConditions on instance', function() {
       var tableView = App.TableView.create();
-      expect(tableView.get('filterConditions') === App.TableView.prototype.filterConditions).to.be.false;
+      expect(tableView.get('filterConditions')).to.be.not.equal(App.TableView.prototype.filterConditions);
     });
 
   });

+ 4 - 2
ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js

@@ -73,14 +73,16 @@ describe('App.MainAdminStackServicesView', function () {
     it("routes to Add Service Wizard and set redirect path on wizard close", function() {
       isAccessibleMock.returns(true);
       view.goToAddService({context: "serviceName"});
-      expect(App.router.get.calledWith('addServiceController') && mock.setDBProperty.calledWith('onClosePath', 'main.admin.stackAndUpgrade.services')).to.be.true;
+      expect(App.router.get.calledWith('addServiceController')).to.be.true;
+      expect(mock.setDBProperty.calledWith('onClosePath', 'main.admin.stackAndUpgrade.services')).to.be.true;
       expect(App.get('router').transitionTo.calledWith('main.serviceAdd')).to.be.true;
       expect(mock.get('serviceToInstall')).to.be.equal("serviceName");
     });
     it("routes to Security Wizard", function() {
       isAccessibleMock.returns(true);
       view.goToAddService({context: "KERBEROS"});
-      expect(App.router.get.calledWith('kerberosWizardController') && mock.setDBProperty.calledWith('onClosePath', 'main.admin.stackAndUpgrade.services')).to.be.true;
+      expect(App.router.get.calledWith('kerberosWizardController')).to.be.true;
+      expect(mock.setDBProperty.calledWith('onClosePath', 'main.admin.stackAndUpgrade.services')).to.be.true;
       expect(mock.checkAndStartKerberosWizard.calledOnce).to.be.true;
     });
   });

+ 2 - 2
ambari-web/test/views/main/alert_definitions_view_test.js

@@ -51,8 +51,8 @@ describe('App.MainAlertDefinitionsView', function () {
     it('Add Ambari service to filters', function () {
       var serviceFilterClass = view.serviceFilterView;
       var content = serviceFilterClass.create({}).get('content');
-      expect(content[0].label === Em.I18n.t('common.all'));
-      expect(content[content.length - 1].label === Em.I18n.t('app.name'));
+      expect(content[0].label).to.be.equal(Em.I18n.t('common.all'));
+      expect(content[content.length - 1].label).to.be.equal(Em.I18n.t('app.name'));
     });
   });
 

+ 10 - 8
ambari-web/test/views/main/dashboard/widget_test.js

@@ -103,20 +103,22 @@ describe('App.DashboardWidgetView', function () {
       dashboardWidgetView.get('parentView').translateToReal.restore();
     });
     it("postUserPref is called with correct data", function () {
-      expect(dashboardWidgetView.get('parentView').postUserPref.calledWith('key', {
+      var arg = JSON.parse(JSON.stringify(dashboardWidgetView.get('parentView').postUserPref.args[0][1]));
+      expect(arg).to.be.eql({
         dashboardVersion: 'new',
-        visible: ['2'],
-        hidden: ['1'],
+        visible: ['1', '2'],
+        hidden: [[5, null]],
         threshold: 'threshold'
-      }));
+      });
     });
     it("translateToReal is called with valid data", function () {
-      expect(dashboardWidgetView.get('parentView').translateToReal.calledWith({
+      var arg = JSON.parse(JSON.stringify(dashboardWidgetView.get('parentView').translateToReal.args[0][0]));
+      expect(arg).to.be.eql({
         dashboardVersion: 'new',
-        visible: ['2'],
-        hidden: ['1'],
+        visible: ['1', '2'],
+        hidden: [[5, null]],
         threshold: 'threshold'
-      }));
+      });
     });
   });
 

+ 69 - 0
ambari-web/test/views/main/dashboard/widgets/hawqsegment_live_test.js

@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+require('views/main/dashboard/widget');
+require('views/main/dashboard/widgets/text_widget');
+require('views/main/dashboard/widgets/hawqsegment_live');
+
+describe('App.HawqSegmentUpView', function() {
+
+  var tests = [
+    {
+      data: 100,
+      e: {
+        isRed: false,
+        isOrange: false,
+        isGreen: true
+      }
+    },
+    {
+      data: 0,
+      e: {
+        isRed: true,
+        isOrange: false,
+        isGreen: false
+      }
+    },
+    {
+      data: 50,
+      e: {
+        isRed: false,
+        isOrange: true,
+        isGreen: false
+      }
+    }
+  ];
+
+  tests.forEach(function(test) {
+    describe('data -' + test.data, function() {
+      var hawqSegmentUpView = App.HawqSegmentUpView.create({model_type:null, data: test.data, content: test.data.toString()});
+      it('shows red', function() {
+        expect(hawqSegmentUpView.get('isRed')).to.equal(test.e.isRed);
+      });
+      it('shows orange', function() {
+        expect(hawqSegmentUpView.get('isOrange')).to.equal(test.e.isOrange);
+      });
+      it('shows green', function() {
+        expect(hawqSegmentUpView.get('isGreen')).to.equal(test.e.isGreen);
+      });
+    });
+  });
+
+});

+ 1 - 1
ambari-web/test/views/main/service/info/metrics/ambari_metrics/regionserver_base_test.js

@@ -50,7 +50,7 @@ describe('App.ChartServiceMetricsAMS_RegionServerBaseView', function () {
     });
 
     it('displayName', function () {
-      expect(this.result[0].name === regionServerView.displayName).to.be.true;
+      expect(this.result[0].name).to.be.equal(regionServerView.displayName);
     });
 
     it('data.length', function () {

+ 2 - 2
ambari-web/test/views/wizard/step5_view_test.js

@@ -191,7 +191,7 @@ describe('App.SelectHostView', function() {
 
     it('should call assignHostToMaster', function() {
       view.changeHandler();
-      expect(view.get('controller').assignHostToMaster.calledWith('ZOOKEEPER_SERVER', 'h1', 1));
+      expect(view.get('controller').assignHostToMaster.args[0]).to.be.eql(['ZOOKEEPER_SERVER', 'h1 info', 1]);
     });
 
     it('should increment rebalanceComponentHostsCounter if component it is multiple', function() {
@@ -271,7 +271,7 @@ describe('App.InputHostView', function() {
 
     it('should call assignHostToMaster', function() {
       view.changeHandler();
-      expect(view.get('controller').assignHostToMaster.calledWith('ZOOKEEPER_SERVER', 'h1', 1));
+      expect(view.get('controller').assignHostToMaster.args[0]).to.be.eql(['ZOOKEEPER_SERVER', 'h1', 1]);
     });
 
     it('should increment rebalanceComponentHostsCounter if component it is multiple', function() {

+ 23 - 8
ambari-web/test/views/wizard/step9_view_test.js

@@ -701,14 +701,29 @@ describe('App.HostStatusView', function () {
           e: false
         }
       ]).forEach(function (test) {
-        it(JSON.stringify(test.obj) + ' ' + test.progress, function() {
-          hv.set('barColor', '');
-          hv.set('obj', test.obj);
-          hv.set('obj.message', '');
-          hv.set('controller', {progress: test.progress});
-          hv.onStatus();
-          expect(hv.get('obj.message') === Em.I18n.t('installer.step9.host.status.success')).to.equal(test.e);
-          expect(hv.get('barColor') === 'progress-success').to.equal(test.e);
+        describe(JSON.stringify(test.obj) + ' ' + test.progress, function() {
+          beforeEach(function () {
+            hv.setProperties({
+              barColor: '',
+              obj: test.obj
+            });
+            hv.set('obj.message', '');
+            hv.set('controller', {progress: test.progress});
+            hv.onStatus();
+          });
+
+          if (test.e) {
+            it('completed successful', function () {
+              expect(hv.get('obj.message')).to.be.equal(Em.I18n.t('installer.step9.host.status.success'));
+              expect(hv.get('barColor')).to.be.equal('progress-success');
+            });
+          }
+          else {
+            it('completed not successful', function () {
+              expect(hv.get('obj.message')).to.be.not.equal(Em.I18n.t('installer.step9.host.status.success'));
+              expect(hv.get('barColor')).to.be.not.equal('progress-success');
+            });
+          }
         });
       });
   });