Selaa lähdekoodia

Merge branch 'trunk' into branch-alerts-dev

Jonathan Hurley 11 vuotta sitten
vanhempi
commit
090ed5143c
33 muutettua tiedostoa jossa 1613 lisäystä ja 184 poistoa
  1. 5 0
      ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
  2. 177 7
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
  3. 42 11
      ambari-server/src/main/python/ambari-server.py
  4. 4 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
  5. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py
  6. 0 6
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
  7. 892 14
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
  8. 69 3
      ambari-server/src/test/python/TestAmbariServer.py
  9. 2 2
      ambari-web/app/messages.js
  10. 6 2
      ambari-web/app/styles/application.less
  11. 63 61
      ambari-web/app/templates/common/configs/config_history_flow.hbs
  12. 2 1
      ambari-web/app/templates/common/configs/service_version_box.hbs
  13. 13 1
      ambari-web/app/views/common/configs/config_history_flow.js
  14. 7 3
      ambari-web/app/views/main/host.js
  15. 17 3
      contrib/views/slider/docs/index.md
  16. 1 0
      contrib/views/slider/src/main/java/org/apache/ambari/view/slider/SliderAppsViewController.java
  17. 1 1
      contrib/views/slider/src/main/java/org/apache/ambari/view/slider/SliderAppsViewControllerImpl.java
  18. 1 1
      contrib/views/slider/src/main/java/org/apache/ambari/view/slider/rest/client/SliderAppMasterClient.java
  19. 1 1
      contrib/views/slider/src/main/resources/ui/app/assets/data/apps/apps.json
  20. 1 0
      contrib/views/slider/src/main/resources/ui/app/assets/data/resource/slider-properties.json
  21. 90 49
      contrib/views/slider/src/main/resources/ui/app/controllers/slider_app_controller.js
  22. 7 2
      contrib/views/slider/src/main/resources/ui/app/mappers/slider_apps_mapper.js
  23. 15 1
      contrib/views/slider/src/main/resources/ui/app/models/slider_app_component.js
  24. 11 10
      contrib/views/slider/src/main/resources/ui/app/routes/main.js
  25. 32 0
      contrib/views/slider/src/main/resources/ui/app/styles/application.less
  26. 8 1
      contrib/views/slider/src/main/resources/ui/app/templates/slider_app.hbs
  27. 20 0
      contrib/views/slider/src/main/resources/ui/app/templates/slider_app/destroy/destroy_popup.hbs
  28. 28 0
      contrib/views/slider/src/main/resources/ui/app/templates/slider_app/destroy/destroy_popup_footer.hbs
  29. 7 1
      contrib/views/slider/src/main/resources/ui/app/templates/slider_app/summary.hbs
  30. 5 0
      contrib/views/slider/src/main/resources/ui/app/translations.js
  31. 41 0
      contrib/views/slider/src/main/resources/ui/app/views/slider_app/destroy_modal_footer_view.js
  32. 37 0
      contrib/views/slider/src/main/resources/ui/app/views/slider_app/destroy_popup_view.js
  33. 6 1
      contrib/views/slider/src/main/resources/view.xml

+ 5 - 0
ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css

@@ -334,6 +334,11 @@
   width: 14px;
   width: 14px;
 }
 }
 
 
+.groups-pane table .search-container .namefilter,
+.users-pane table .search-container .namefilter {
+  font-weight: normal;
+}
+
 .settings-edit-toggle.disabled, .properties-toggle.disabled{
 .settings-edit-toggle.disabled, .properties-toggle.disabled{
   color: #999;
   color: #999;
   cursor: not-allowed;
   cursor: not-allowed;

+ 177 - 7
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -22,6 +22,8 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
@@ -96,7 +98,7 @@ public class BlueprintConfigurationProcessor {
    * @return  updated properties
    * @return  updated properties
    */
    */
   public Map<String, Map<String, String>> doUpdateForClusterCreate(Map<String, ? extends HostGroup> hostGroups) {
   public Map<String, Map<String, String>> doUpdateForClusterCreate(Map<String, ? extends HostGroup> hostGroups) {
-    for (Map<String, Map<String, PropertyUpdater>> updaterMap : allUpdaters) {
+    for (Map<String, Map<String, PropertyUpdater>> updaterMap : createCollectionOfUpdaters()) {
       for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
       for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
         String type = entry.getKey();
         String type = entry.getKey();
         for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
         for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) {
@@ -114,6 +116,43 @@ public class BlueprintConfigurationProcessor {
     return properties;
     return properties;
   }
   }
 
 
+  /**
+   * Creates a Collection of PropertyUpdater maps that will handle the configuration
+   *   update for this cluster.  If NameNode HA is enabled, then updater
+   *   instances will be added to the collection, in addition to the default list
+   *   of Updaters that are statically defined.
+   *
+   * @return Collection of PropertyUpdater maps used to handle cluster config update
+   */
+  private Collection<Map<String, Map<String, PropertyUpdater>>> createCollectionOfUpdaters() {
+    return (isNameNodeHAEnabled()) ? addHAUpdaters(allUpdaters) : allUpdaters;
+  }
+
+  /**
+   * Creates a Collection of PropertyUpdater maps that include the NameNode HA properties, and
+   *   adds these to the list of updaters used to process the cluster configuration.  The HA
+   *   properties are based on the names of the HA namservices and name nodes, and so must
+   *   be registered at runtime, rather than in the static list.  This new Collection includes
+   *   the statically-defined updaters, in addition to the HA-related updaters.
+   *
+   * @param updaters a Collection of updater maps to be included in the list of updaters for
+   *                   this cluster config update
+   * @return A Collection of PropertyUpdater maps to handle the cluster config update
+   */
+  private Collection<Map<String, Map<String, PropertyUpdater>>> addHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
+    Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
+      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+
+    // always add the statically-defined list of updaters to the list to use
+    // in processing cluster configuration
+    highAvailabilityUpdaters.addAll(updaters);
+
+    // add the updaters for the dynamic HA properties, based on the HA config in hdfs-site
+    highAvailabilityUpdaters.add(createMapOfHAUpdaters());
+
+    return highAvailabilityUpdaters;
+  }
+
   /**
   /**
    * Update properties for blueprint export.
    * Update properties for blueprint export.
    * This involves converting concrete topology information to host groups.
    * This involves converting concrete topology information to host groups.
@@ -125,11 +164,100 @@ public class BlueprintConfigurationProcessor {
   public Map<String, Map<String, String>> doUpdateForBlueprintExport(Collection<? extends HostGroup> hostGroups) {
   public Map<String, Map<String, String>> doUpdateForBlueprintExport(Collection<? extends HostGroup> hostGroups) {
     doSingleHostExportUpdate(hostGroups, singleHostTopologyUpdaters);
     doSingleHostExportUpdate(hostGroups, singleHostTopologyUpdaters);
     doSingleHostExportUpdate(hostGroups, dbHostTopologyUpdaters);
     doSingleHostExportUpdate(hostGroups, dbHostTopologyUpdaters);
+
+    if (isNameNodeHAEnabled()) {
+      doNameNodeHAUpdate(hostGroups);
+    }
+
     doMultiHostExportUpdate(hostGroups, multiHostTopologyUpdaters);
     doMultiHostExportUpdate(hostGroups, multiHostTopologyUpdaters);
 
 
     return properties;
     return properties;
   }
   }
 
 
+  /**
+   * Perform export update processing for HA configuration for NameNodes.  The HA NameNode property
+   *   names are based on the nameservices defined when HA is enabled via the Ambari UI, so this method
+   *   dynamically determines the property names, and registers PropertyUpdaters to handle the masking of
+   *   host names in these configuration items.
+   *
+   * @param hostGroups cluster host groups
+   */
+  public void doNameNodeHAUpdate(Collection<? extends HostGroup> hostGroups) {
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = createMapOfHAUpdaters();
+
+    // perform a single host update on these dynamically generated property names
+    if (highAvailabilityUpdaters.get("hdfs-site").size() > 0) {
+      doSingleHostExportUpdate(hostGroups, highAvailabilityUpdaters);
+    }
+  }
+
+  /**
+   * Creates map of PropertyUpdater instances that are associated with
+   *   NameNode High Availability (HA).  The HA configuration property
+   *   names are dynamic, and based on other HA config elements in
+   *   hdfs-site.  This method registers updaters for the required
+   *   properties associated with each nameservice and namenode.
+   *
+   * @return a Map of registered PropertyUpdaters for handling HA properties in hdfs-site
+   */
+  private Map<String, Map<String, PropertyUpdater>> createMapOfHAUpdaters() {
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
+    Map<String, PropertyUpdater> hdfsSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    highAvailabilityUpdaters.put("hdfs-site", hdfsSiteUpdatersForAvailability);
+
+    Map<String, String> hdfsSiteConfig = properties.get("hdfs-site");
+    // generate the property names based on the current HA config for the NameNode deployments
+    for (String nameService : parseNameServices(hdfsSiteConfig)) {
+      for (String nameNode : parseNameNodes(nameService, hdfsSiteConfig)) {
+        final String httpsPropertyName = "dfs.namenode.https-address." + nameService + "." + nameNode;
+        hdfsSiteUpdatersForAvailability.put(httpsPropertyName, new SingleHostTopologyUpdater("NAMENODE"));
+        final String httpPropertyName = "dfs.namenode.http-address." + nameService + "." + nameNode;
+        hdfsSiteUpdatersForAvailability.put(httpPropertyName, new SingleHostTopologyUpdater("NAMENODE"));
+        final String rpcPropertyName = "dfs.namenode.rpc-address." + nameService + "." + nameNode;
+        hdfsSiteUpdatersForAvailability.put(rpcPropertyName, new SingleHostTopologyUpdater("NAMENODE"));
+      }
+    }
+    return highAvailabilityUpdaters;
+  }
+
+  /**
+   * Convenience function to determine if NameNode HA is enabled.
+   *
+   * @return true if NameNode HA is enabled
+   *         false if NameNode HA is not enabled
+   */
+  boolean isNameNodeHAEnabled() {
+    return properties.containsKey("hdfs-site") && properties.get("hdfs-site").containsKey("dfs.nameservices");
+  }
+
+
+  /**
+   * Parses out the list of nameservices associated with this HDFS configuration.
+   *
+   * @param properties config properties for this cluster
+   *
+   * @return array of Strings that indicate the nameservices for this cluster
+   */
+  static String[] parseNameServices(Map<String, String> properties) {
+    final String nameServices = properties.get("dfs.nameservices");
+    return splitAndTrimStrings(nameServices);
+  }
+
+  /**
+   * Parses out the list of name nodes associated with a given HDFS
+   *   NameService, based on a given HDFS configuration.
+   *
+   * @param nameService the nameservice used for this parsing
+   * @param properties config properties for this cluster
+   *
+   * @return array of Strings that indicate the name nodes associated
+   *           with this nameservice
+   */
+  static String[] parseNameNodes(String nameService, Map<String, String> properties) {
+    final String nameNodes = properties.get("dfs.ha.namenodes." + nameService);
+    return splitAndTrimStrings(nameNodes);
+  }
+
   /**
   /**
    * Update single host topology configuration properties for blueprint export.
    * Update single host topology configuration properties for blueprint export.
    *
    *
@@ -274,15 +402,22 @@ public class BlueprintConfigurationProcessor {
     return hosts;
     return hosts;
   }
   }
 
 
-
   /**
   /**
-   * Provides package-level access to the map of single host topology updaters.
-   * This is useful for facilitating unit-testing of this class.
+   * Convenience method for splitting out the HA-related properties, while
+   *   also removing leading/trailing whitespace.
+   *
+   * @param propertyName property name to parse
    *
    *
-   * @return the map of single host topology updaters
+   * @return an array of Strings that represent the comma-separated
+   *         elements in this property
    */
    */
-  static Map<String, Map<String, PropertyUpdater>> getSingleHostTopologyUpdaters() {
-    return singleHostTopologyUpdaters;
+  private static String[] splitAndTrimStrings(String propertyName) {
+    List<String> namesWithoutWhitespace = new LinkedList<String>();
+    for (String service : propertyName.split(",")) {
+      namesWithoutWhitespace.add(service.trim());
+    }
+
+    return namesWithoutWhitespace.toArray(new String[namesWithoutWhitespace.size()]);
   }
   }
 
 
   /**
   /**
@@ -630,13 +765,19 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hiveEnvMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> oozieEnvMap = new HashMap<String, PropertyUpdater>();
 
 
     Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<String, PropertyUpdater>();
 
 
     Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
 
 
+    Map<String, PropertyUpdater> nagiosEnvMap = new HashMap<String ,PropertyUpdater>();
+
 
 
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
     singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
     singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
@@ -647,6 +788,9 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("oozie-site", oozieSiteMap);
     singleHostTopologyUpdaters.put("oozie-site", oozieSiteMap);
     singleHostTopologyUpdaters.put("storm-site", stormSiteMap);
     singleHostTopologyUpdaters.put("storm-site", stormSiteMap);
     singleHostTopologyUpdaters.put("falcon-startup.properties", falconStartupPropertiesMap);
     singleHostTopologyUpdaters.put("falcon-startup.properties", falconStartupPropertiesMap);
+    singleHostTopologyUpdaters.put("nagios-env", nagiosEnvMap);
+    singleHostTopologyUpdaters.put("hive-env", hiveEnvMap);
+    singleHostTopologyUpdaters.put("oozie-env", oozieEnvMap);
 
 
     mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
     mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
@@ -655,6 +799,8 @@ public class BlueprintConfigurationProcessor {
     multiHostTopologyUpdaters.put("webhcat-site", multiWebhcatSiteMap);
     multiHostTopologyUpdaters.put("webhcat-site", multiWebhcatSiteMap);
     multiHostTopologyUpdaters.put("hbase-site", multiHbaseSiteMap);
     multiHostTopologyUpdaters.put("hbase-site", multiHbaseSiteMap);
     multiHostTopologyUpdaters.put("storm-site", multiStormSiteMap);
     multiHostTopologyUpdaters.put("storm-site", multiStormSiteMap);
+    multiHostTopologyUpdaters.put("core-site", multiCoreSiteMap);
+    multiHostTopologyUpdaters.put("hdfs-site", multiHdfsSiteMap);
 
 
     dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap);
     dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap);
 
 
@@ -666,6 +812,7 @@ public class BlueprintConfigurationProcessor {
     hdfsSiteMap.put("dfs.namenode.https-address", new SingleHostTopologyUpdater("NAMENODE"));
     hdfsSiteMap.put("dfs.namenode.https-address", new SingleHostTopologyUpdater("NAMENODE"));
     coreSiteMap.put("fs.defaultFS", new SingleHostTopologyUpdater("NAMENODE"));
     coreSiteMap.put("fs.defaultFS", new SingleHostTopologyUpdater("NAMENODE"));
     hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE"));
     hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE"));
+    multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE"));
 
 
     // SECONDARY_NAMENODE
     // SECONDARY_NAMENODE
     hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
     hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
@@ -690,17 +837,34 @@ public class BlueprintConfigurationProcessor {
     yarnSiteMap.put("yarn.resourcemanager.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
     yarnSiteMap.put("yarn.resourcemanager.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
     yarnSiteMap.put("yarn.resourcemanager.admin.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
     yarnSiteMap.put("yarn.resourcemanager.admin.address", new SingleHostTopologyUpdater("RESOURCEMANAGER"));
 
 
+    // APP_TIMELINE_SERVER
+    yarnSiteMap.put("yarn.timeline-service.address", new SingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
+    yarnSiteMap.put("yarn.timeline-service.webapp.address", new SingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
+    yarnSiteMap.put("yarn.timeline-service.webapp.https.address", new SingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
+
+
     // HIVE_SERVER
     // HIVE_SERVER
     hiveSiteMap.put("hive.metastore.uris", new SingleHostTopologyUpdater("HIVE_SERVER"));
     hiveSiteMap.put("hive.metastore.uris", new SingleHostTopologyUpdater("HIVE_SERVER"));
     dbHiveSiteMap.put("javax.jdo.option.ConnectionURL",
     dbHiveSiteMap.put("javax.jdo.option.ConnectionURL",
         new DBTopologyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
         new DBTopologyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
+    multiCoreSiteMap.put("hadoop.proxyuser.hive.hosts", new MultipleHostTopologyUpdater("HIVE_SERVER"));
+    multiCoreSiteMap.put("hadoop.proxyuser.HTTP.hosts", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
+    multiCoreSiteMap.put("hadoop.proxyuser.hcat.hosts", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
+    multiWebhcatSiteMap.put("templeton.hive.properties", new MultipleHostTopologyUpdater("HIVE_SERVER"));
+    multiWebhcatSiteMap.put("templeton.kerberos.principal", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
+    hiveEnvMap.put("hive_hostname", new SingleHostTopologyUpdater("HIVE_SERVER"));
 
 
     // OOZIE_SERVER
     // OOZIE_SERVER
     oozieSiteMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER"));
     oozieSiteMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER"));
+    oozieSiteMap.put("oozie.authentication.kerberos.principal", new SingleHostTopologyUpdater("OOZIE_SERVER"));
+    oozieSiteMap.put("oozie.service.HadoopAccessorService.kerberos.principal", new SingleHostTopologyUpdater("OOZIE_SERVER"));
+    oozieEnvMap.put("oozie_hostname", new SingleHostTopologyUpdater("OOZIE_SERVER"));
+    multiCoreSiteMap.put("hadoop.proxyuser.oozie.hosts", new MultipleHostTopologyUpdater("OOZIE_SERVER"));
 
 
     // ZOOKEEPER_SERVER
     // ZOOKEEPER_SERVER
     multiHbaseSiteMap.put("hbase.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     multiHbaseSiteMap.put("hbase.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     multiWebhcatSiteMap.put("templeton.zookeeper.hosts", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     multiWebhcatSiteMap.put("templeton.zookeeper.hosts", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
+    multiCoreSiteMap.put("ha.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
 
 
     // STORM
     // STORM
     stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS"));
     stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS"));
@@ -712,6 +876,12 @@ public class BlueprintConfigurationProcessor {
 
 
     // FALCON
     // FALCON
     falconStartupPropertiesMap.put("*.broker.url", new SingleHostTopologyUpdater("FALCON_SERVER"));
     falconStartupPropertiesMap.put("*.broker.url", new SingleHostTopologyUpdater("FALCON_SERVER"));
+    falconStartupPropertiesMap.put("*.falcon.service.authentication.kerberos.principal", new SingleHostTopologyUpdater("FALCON_SERVER"));
+    falconStartupPropertiesMap.put("*.falcon.http.authentication.kerberos.principal", new SingleHostTopologyUpdater("FALCON_SERVER"));
+
+
+    // NAGIOS
+    nagiosEnvMap.put("nagios_principal_name", new SingleHostTopologyUpdater("NAGIOS_SERVER"));
 
 
 
 
     // Required due to AMBARI-4933.  These no longer seem to be required as the default values in the stack
     // Required due to AMBARI-4933.  These no longer seem to be required as the default values in the stack

+ 42 - 11
ambari-server/src/main/python/ambari-server.py

@@ -127,8 +127,6 @@ SERVER_API_HOST = '127.0.0.1'
 SERVER_API_PROTOCOL = 'http'
 SERVER_API_PROTOCOL = 'http'
 SERVER_API_PORT = '8080'
 SERVER_API_PORT = '8080'
 SERVER_API_LDAP_URL = '/api/v1/controllers/ldap'
 SERVER_API_LDAP_URL = '/api/v1/controllers/ldap'
-SERVER_API_LOGIN = 'admin'
-SERVER_API_PASS = 'admin'
 
 
 # terminal styles
 # terminal styles
 BOLD_ON = '\033[1m'
 BOLD_ON = '\033[1m'
@@ -1462,6 +1460,22 @@ def get_ambari_version(properties):
   return version
   return version
 
 
 
 
+def get_db_type(properties):
+  db_type = None
+  if properties[JDBC_URL_PROPERTY]:
+    jdbc_url = properties[JDBC_URL_PROPERTY].lower()
+    if "postgres" in jdbc_url:
+      db_type = "postgres"
+    elif "oracle" in jdbc_url:
+      db_type = "oracle"
+    elif "mysql" in jdbc_url:
+      db_type = "mysql"
+    elif "derby" in jdbc_url:
+      db_type = "derby"
+
+  return db_type
+
+
 def check_database_name_property(args, upgrade=False):
 def check_database_name_property(args, upgrade=False):
   """
   """
   :param upgrade: If Ambari is being upgraded.
   :param upgrade: If Ambari is being upgraded.
@@ -1475,18 +1489,29 @@ def check_database_name_property(args, upgrade=False):
   version = get_ambari_version(properties)
   version = get_ambari_version(properties)
   if upgrade and compare_versions(version, "1.7.0") >= 0:
   if upgrade and compare_versions(version, "1.7.0") >= 0:
 
 
-    expected_db_name = properties[JDBC_DATABASE_NAME_PROPERTY]
-    # The existing ambari config file is probably from an earlier version of Ambari, and needs to be transformed.
-    if expected_db_name is None or expected_db_name == "":
-      db_name = properties[JDBC_DATABASE_PROPERTY]
+    # This code exists for historic reasons in which property names changed from Ambari 1.6.1 to 1.7.0
+    persistence_type = properties[PERSISTENCE_TYPE_PROPERTY]
+    if persistence_type == "remote":
+      db_name = properties["server.jdbc.schema"]  # this was a property in Ambari 1.6.1, but not after 1.7.0
+      if db_name:
+        write_property(JDBC_DATABASE_NAME_PROPERTY, db_name)
 
 
+      # If DB type is missing, attempt to reconstruct it from the JDBC URL
+      db_type = properties[JDBC_DATABASE_PROPERTY]
+      if db_type is None or db_type.strip().lower() not in ["postgres", "oracle", "mysql", "derby"]:
+        db_type = get_db_type(properties)
+        if db_type:
+          write_property(JDBC_DATABASE_PROPERTY, db_type)
+
+      properties = get_ambari_properties()
+    elif persistence_type == "local":
+      # Ambari 1.6.1, had "server.jdbc.database" as the DB name, and the
+      # DB type was assumed to be "postgres" if was embedded ("local")
+      db_name = properties[JDBC_DATABASE_PROPERTY]
       if db_name:
       if db_name:
         write_property(JDBC_DATABASE_NAME_PROPERTY, db_name)
         write_property(JDBC_DATABASE_NAME_PROPERTY, db_name)
-        remove_property(JDBC_DATABASE_PROPERTY)
+        write_property(JDBC_DATABASE_PROPERTY, "postgres")
         properties = get_ambari_properties()
         properties = get_ambari_properties()
-      else:
-        err = "DB Name property not set in config file.\n" + SETUP_OR_UPGRADE_MSG
-        raise FatalException(-1, "Upgrade to version %s cannot transform config file." % str(version))
 
 
   dbname = properties[JDBC_DATABASE_NAME_PROPERTY]
   dbname = properties[JDBC_DATABASE_NAME_PROPERTY]
   if dbname is None or dbname == "":
   if dbname is None or dbname == "":
@@ -3021,8 +3046,14 @@ def sync_ldap():
     err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
     err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
     raise FatalException(1, err)
     raise FatalException(1, err)
 
 
+  admin_login = get_validated_string_input(prompt="Enter login: ", default=None,
+                                           pattern=None, description=None,
+                                           is_pass=False, allowEmpty=False)
+  admin_password = get_validated_string_input(prompt="Enter password: ", default=None,
+                                              pattern=None, description=None,
+                                              is_pass=True, allowEmpty=False)
   url = '{0}://{1}:{2!s}{3}'.format(SERVER_API_PROTOCOL, SERVER_API_HOST, SERVER_API_PORT, SERVER_API_LDAP_URL)
   url = '{0}://{1}:{2!s}{3}'.format(SERVER_API_PROTOCOL, SERVER_API_HOST, SERVER_API_PORT, SERVER_API_LDAP_URL)
-  admin_auth = base64.encodestring('%s:%s' % (SERVER_API_LOGIN, SERVER_API_PASS)).replace('\n', '')
+  admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
   request = urllib2.Request(url)
   request = urllib2.Request(url)
   request.add_header('Authorization', 'Basic %s' % admin_auth)
   request.add_header('Authorization', 'Basic %s' % admin_auth)
   request.add_header('X-Requested-By', 'ambari')
   request.add_header('X-Requested-By', 'ambari')

+ 4 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py

@@ -41,6 +41,8 @@ if rpm_version is not None:
   tez_local_api_jars = '/usr/hdp/current/tez/tez*.jar'
   tez_local_api_jars = '/usr/hdp/current/tez/tez*.jar'
   tez_local_lib_jars = '/usr/hdp/current/tez/lib/*.jar'
   tez_local_lib_jars = '/usr/hdp/current/tez/lib/*.jar'
   tez_tar_file = "/usr/hdp/current/tez/lib/tez*.tar.gz"
   tez_tar_file = "/usr/hdp/current/tez/lib/tez*.tar.gz"
+  pig_tar_file = '/usr/hdp/current/pig/pig.tar.gz'
+  hive_tar_file = '/usr/hdp/current/hive/hive.tar.gz'
 
 
   hcat_lib = '/usr/hdp/current/hive/hive-hcatalog/share/hcatalog'
   hcat_lib = '/usr/hdp/current/hive/hive-hcatalog/share/hcatalog'
   webhcat_bin_dir = '/usr/hdp/current/hive/hive-hcatalog/sbin'
   webhcat_bin_dir = '/usr/hdp/current/hive/hive-hcatalog/sbin'
@@ -54,6 +56,8 @@ else:
   tez_local_api_jars = '/usr/lib/tez/tez*.jar'
   tez_local_api_jars = '/usr/lib/tez/tez*.jar'
   tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
   tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
   tez_tar_file = "/usr/lib/tez/tez*.tar.gz"
   tez_tar_file = "/usr/lib/tez/tez*.tar.gz"
+  pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
+  hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
 
 
   if str(hdp_stack_version).startswith('2.0'):
   if str(hdp_stack_version).startswith('2.0'):
     hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
     hcat_lib = '/usr/lib/hcatalog/share/hcatalog'

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py

@@ -95,7 +95,7 @@ def webhcat():
                 hadoop_conf_dir=params.hadoop_conf_dir
                 hadoop_conf_dir=params.hadoop_conf_dir
   )
   )
 
 
-  CopyFromLocal('/usr/share/HDP-webhcat/pig.tar.gz',
+  CopyFromLocal(params.pig_tar_file,
                 owner=params.webhcat_user,
                 owner=params.webhcat_user,
                 mode=0755,
                 mode=0755,
                 dest_dir=params.webhcat_apps_dir,
                 dest_dir=params.webhcat_apps_dir,
@@ -105,7 +105,7 @@ def webhcat():
                 hadoop_conf_dir=params.hadoop_conf_dir
                 hadoop_conf_dir=params.hadoop_conf_dir
   )
   )
 
 
-  CopyFromLocal('/usr/share/HDP-webhcat/hive.tar.gz',
+  CopyFromLocal(params.hive_tar_file,
                 owner=params.webhcat_user,
                 owner=params.webhcat_user,
                 mode=0755,
                 mode=0755,
                 dest_dir=params.webhcat_apps_dir,
                 dest_dir=params.webhcat_apps_dir,

+ 0 - 6
ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml

@@ -37,12 +37,6 @@
             <package>
             <package>
               <name>hive_2_9_9_9_*-webhcat</name>
               <name>hive_2_9_9_9_*-webhcat</name>
             </package>
             </package>
-            <package>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <name>webhcat-tar-pig</name>
-            </package>
           </packages>
           </packages>
         </osSpecific>
         </osSpecific>
         <osSpecific>
         <osSpecific>

+ 892 - 14
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java

@@ -18,8 +18,10 @@
 
 
 package org.apache.ambari.server.controller.internal;
 package org.apache.ambari.server.controller.internal;
 
 
+import org.easymock.EasyMockSupport;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
@@ -29,8 +31,9 @@ import java.util.Set;
 
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertNotNull;
 import static junit.framework.Assert.assertTrue;
 import static junit.framework.Assert.assertTrue;
-
+import static org.easymock.EasyMock.expect;
 
 
 /**
 /**
  * BlueprintConfigurationProcessor unit tests.
  * BlueprintConfigurationProcessor unit tests.
@@ -897,24 +900,899 @@ public class BlueprintConfigurationProcessorTest {
   }
   }
 
 
   @Test
   @Test
-  public void testFalconConfigPropertyUpdaterAdded() throws Exception {
-    Map<String, Map<String, BlueprintConfigurationProcessor.PropertyUpdater>> singleHostUpdaters =
-      BlueprintConfigurationProcessor.getSingleHostTopologyUpdaters();
+  public void testFalconConfigExport() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> falconStartupProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("falcon-startup.properties", falconStartupProperties);
+
+    // setup properties that include host information
+    falconStartupProperties.put("*.broker.url", expectedHostName + ":" + expectedPortNum);
+    falconStartupProperties.put("*.falcon.service.authentication.kerberos.principal", "falcon/" + expectedHostName + "@EXAMPLE.COM");
+    falconStartupProperties.put("*.falcon.http.authentication.kerberos.principal", "HTTP/" + expectedHostName + "@EXAMPLE.COM");
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
+
+    assertEquals("Falcon Broker URL property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), falconStartupProperties.get("*.broker.url"));
+
+    assertEquals("Falcon Kerberos Principal property not properly exported",
+      "falcon/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
+
+    assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
+      "HTTP/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testFalconConfigClusterUpdate() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> falconStartupProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("falcon-startup.properties", falconStartupProperties);
+
+    // setup properties that include host information
+    falconStartupProperties.put("*.broker.url", createExportedAddress(expectedPortNum, expectedHostGroupName));
+    falconStartupProperties.put("*.falcon.service.authentication.kerberos.principal", "falcon/" + createExportedHostName(expectedHostGroupName) + "@EXAMPLE.COM");
+    falconStartupProperties.put("*.falcon.http.authentication.kerberos.principal", "HTTP/" + createExportedHostName(expectedHostGroupName) + "@EXAMPLE.COM");
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+
+    // call top-level export method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups);
+
+    assertEquals("Falcon Broker URL property not properly exported",
+      expectedHostName + ":" + expectedPortNum, falconStartupProperties.get("*.broker.url"));
+
+    assertEquals("Falcon Kerberos Principal property not properly exported",
+      "falcon/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
+
+    assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
+      "HTTP/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testFalconConfigClusterUpdateDefaultConfig() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    expect(mockHostGroupOne.getComponents()).andReturn(Arrays.asList("FALCON_SERVER")).atLeastOnce();
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> falconStartupProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("falcon-startup.properties", falconStartupProperties);
+
+    // setup properties that include host information
+    falconStartupProperties.put("*.broker.url", "localhost:" + expectedPortNum);
+    falconStartupProperties.put("*.falcon.service.authentication.kerberos.principal", "falcon/" + "localhost" + "@EXAMPLE.COM");
+    falconStartupProperties.put("*.falcon.http.authentication.kerberos.principal", "HTTP/" + "localhost" + "@EXAMPLE.COM");
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+
+    // call top-level export method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups);
+
+    assertEquals("Falcon Broker URL property not properly exported",
+      expectedHostName + ":" + expectedPortNum, falconStartupProperties.get("*.broker.url"));
+
+    assertEquals("Falcon Kerberos Principal property not properly exported",
+      "falcon/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
+
+    assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
+      "HTTP/" + expectedHostName + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testDoUpdateForClusterWithNameNodeHAEnabled() throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+
+    // setup hdfs HA config for test
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + ", " + expectedNodeTwo);
+
+
+    // setup properties that include exported host group information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
+
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups = new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName,mockHostGroupOne);
+
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups);
+
+    // verify that the expected hostname was substitued for the host group name in the config
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testDoNameNodeHighAvailabilityUpdateWithHAEnabled() throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+
+    // setup hdfs config for test
+
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + ", " + expectedNodeTwo);
+
+
+    // setup properties that include host information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method, which will call the HA-specific method if HA is enabled
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testDoNameNodeHighAvailabilityUpdateWithHANotEnabled() throws Exception {
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+
+    // hdfs-site config for this test will not include an HA values
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    assertEquals("Incorrect initial state for hdfs-site config",
+      0, hdfsSiteProperties.size());
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
+
+    assertEquals("Incorrect state for hdsf-site config after HA call in non-HA environment, should be zero",
+      0, hdfsSiteProperties.size());
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testDoNameNodeHighAvailabilityUpdateWithHAEnabledMultipleServices() throws Exception {
+    final String expectedNameServiceOne = "mynameserviceOne";
+    final String expectedNameServiceTwo = "mynameserviceTwo";
+    final String expectedHostNameOne = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.apache.ambari.org";
+
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostNameOne, expectedHostNameTwo, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+
+    // setup hdfs config for test
+
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameServiceOne + "," + expectedNameServiceTwo);
+    hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceOne, expectedNodeOne + ", " + expectedNodeTwo);
+    hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo, expectedNodeOne + ", " + expectedNodeTwo);
+
+
+    // setup properties that include host information for nameservice one
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo, expectedHostNameOne + ":" + expectedPortNum);
+
+    // setup properties that include host information for nameservice two
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
+
 
 
-    assertTrue("Falcon startup.properties map was not added to the list of updater maps",
-               singleHostUpdaters.containsKey("falcon-startup.properties"));
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
 
 
-    Map<String, BlueprintConfigurationProcessor.PropertyUpdater> fieldsToUpdaters =
-      singleHostUpdaters.get("falcon-startup.properties");
+    // call top-level export method, which will call the HA-specific method if HA is enabled
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
 
 
-    assertTrue("Expected Falcon config property was not present in updater map",
-               fieldsToUpdaters.containsKey("*.broker.url"));
+    // verify results for name service one
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
 
-    assertTrue("PropertyUpdater was not of the expected type for Falcon config property",
-               fieldsToUpdaters.get("*.broker.url") instanceof BlueprintConfigurationProcessor.SingleHostTopologyUpdater);
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+
+
+    // verify results for name service two
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testIsNameNodeHAEnabled() throws Exception {
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    assertFalse("Incorrect HA detection, hdfs-site not available",
+      configProcessor.isNameNodeHAEnabled());
+
+    Map<String, String> hdfsSiteMap = new HashMap<String, String>();
+    configProperties.put("hdfs-site", hdfsSiteMap);
+
+    assertFalse("Incorrect HA detection, HA flag not enabled",
+      configProcessor.isNameNodeHAEnabled());
+
+    hdfsSiteMap.put("dfs.nameservices", "myTestNameService");
+
+    assertTrue("Incorrect HA detection, HA was enabled",
+      configProcessor.isNameNodeHAEnabled());
+
+  }
+
+  @Test
+  public void testParseNameServices() throws Exception {
+    Map<String, String> hdfsSiteConfigMap =
+      new HashMap<String, String>();
+    hdfsSiteConfigMap.put("dfs.nameservices", "serviceOne");
+
+    // verify that a single service is parsed correctly
+    String[] result = BlueprintConfigurationProcessor.parseNameServices(hdfsSiteConfigMap);
+
+    assertNotNull("Resulting array was null",
+      result);
+    assertEquals("Incorrect array size",
+      1, result.length);
+    assertEquals("Incorrect value for returned name service",
+      "serviceOne", result[0]);
+
+    // verify that multiple services are parsed correctly
+    hdfsSiteConfigMap.put("dfs.nameservices", " serviceTwo, serviceThree, serviceFour");
+
+    String[] resultTwo = BlueprintConfigurationProcessor.parseNameServices(hdfsSiteConfigMap);
+
+    assertNotNull("Resulting array was null",
+      resultTwo);
+    assertEquals("Incorrect array size",
+      3, resultTwo.length);
+    assertEquals("Incorrect value for returned name service",
+      "serviceTwo", resultTwo[0]);
+    assertEquals("Incorrect value for returned name service",
+      "serviceThree", resultTwo[1]);
+    assertEquals("Incorrect value for returned name service",
+      "serviceFour", resultTwo[2]);
+  }
+
+  @Test
+  public void testParseNameNodes() throws Exception {
+    final String expectedServiceName = "serviceOne";
+    Map<String, String> hdfsSiteConfigMap =
+      new HashMap<String, String>();
+    hdfsSiteConfigMap.put("dfs.ha.namenodes." + expectedServiceName, "node1");
+
+    // verify that a single name node is parsed correctly
+    String[] result =
+      BlueprintConfigurationProcessor.parseNameNodes(expectedServiceName, hdfsSiteConfigMap);
+
+    assertNotNull("Resulting array was null",
+      result);
+    assertEquals("Incorrect array size",
+      1, result.length);
+    assertEquals("Incorrect value for returned name nodes",
+      "node1", result[0]);
+
+    // verify that multiple name nodes are parsed correctly
+    hdfsSiteConfigMap.put("dfs.ha.namenodes." + expectedServiceName, " nodeSeven, nodeEight, nodeNine");
+
+    String[] resultTwo =
+      BlueprintConfigurationProcessor.parseNameNodes(expectedServiceName, hdfsSiteConfigMap);
+
+    assertNotNull("Resulting array was null",
+      resultTwo);
+    assertEquals("Incorrect array size",
+      3, resultTwo.length);
+    assertEquals("Incorrect value for returned name node",
+      "nodeSeven", resultTwo[0]);
+    assertEquals("Incorrect value for returned name node",
+      "nodeEight", resultTwo[1]);
+    assertEquals("Incorrect value for returned name node",
+      "nodeNine", resultTwo[2]);
+
+  }
+
+  @Test
+  public void testYarnConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> yarnSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("yarn-site", yarnSiteProperties);
+
+    // setup properties that include host information
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
+    yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.scheduler.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.admin.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.timeline-service.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.timeline-service.webapp.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.timeline-service.webapp.https.address", expectedHostName + ":" + expectedPortNum);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
+
+    assertEquals("Yarn Log Server URL was incorrectly exported",
+      "http://" + "%HOSTGROUP::" + expectedHostGroupName + "%" +":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
+    assertEquals("Yarn ResourceManager hostname was incorrectly exported",
+      createExportedHostName(expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.hostname"));
+    assertEquals("Yarn ResourceManager tracker address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
+    assertEquals("Yarn ResourceManager webapp address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
+    assertEquals("Yarn ResourceManager scheduler address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
+    assertEquals("Yarn ResourceManager address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
+    assertEquals("Yarn ResourceManager admin address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
+    assertEquals("Yarn ResourceManager timeline-service address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.address"));
+    assertEquals("Yarn ResourceManager timeline webapp address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+    assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testHDFSConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+
+    Map<String, String> hbaseSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+    configProperties.put("core-site", coreSiteProperties);
+    configProperties.put("hbase-site", hbaseSiteProperties);
+
+    // setup properties that include host information
+    hdfsSiteProperties.put("dfs.http.address", expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.https.address", expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address", expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address", expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.secondary.http.address", expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.secondary.http-address", expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.shared.edits.dir", expectedHostName + ":" + expectedPortNum);
+
+    coreSiteProperties.put("fs.default.name", expectedHostName + ":" + expectedPortNum);
+    coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedHostName + ":" + expectedPortNum);
+
+    hbaseSiteProperties.put("hbase.rootdir", "hdfs://" + expectedHostName + ":" + expectedPortNum + "/apps/hbase/data");
+
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
+
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.http.address"));
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.https.address"));
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address"));
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address"));
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.secondary.http.address"));
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.secondary.http-address"));
+    assertEquals("hdfs config property not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
+
+    assertEquals("hdfs config in core-site not exported properly",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.default.name"));
+    assertEquals("hdfs config in core-site not exported properly",
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.defaultFS"));
+
+    assertEquals("hdfs config in hbase-site not exported properly",
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testHiveConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.ambari.apache.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+    expect(mockHostGroupTwo.getName()).andReturn(expectedHostGroupNameTwo).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hiveSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> hiveEnvProperties =
+      new HashMap<String, String>();
+    Map<String, String> webHCatSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hive-site", hiveSiteProperties);
+    configProperties.put("hive-env", hiveEnvProperties);
+    configProperties.put("webhcat-site", webHCatSiteProperties);
+    configProperties.put("core-site", coreSiteProperties);
+
+
+    // setup properties that include host information
+    hiveSiteProperties.put("hive.metastore.uris", expectedHostName + ":" + expectedPortNum);
+    hiveSiteProperties.put("javax.jdo.option.ConnectionURL", expectedHostName + ":" + expectedPortNum);
+    hiveEnvProperties.put("hive_hostname", expectedHostName);
+
+    webHCatSiteProperties.put("templeton.hive.properties", expectedHostName + "," + expectedHostNameTwo);
+    webHCatSiteProperties.put("templeton.kerberos.principal", expectedHostName);
+
+    coreSiteProperties.put("hadoop.proxyuser.hive.hosts", expectedHostName + "," + expectedHostNameTwo);
+    coreSiteProperties.put("hadoop.proxyuser.HTTP.hosts", expectedHostName + "," + expectedHostNameTwo);
+    coreSiteProperties.put("hadoop.proxyuser.hcat.hosts", expectedHostName + "," + expectedHostNameTwo);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, mockHostGroupTwo));
+
+    assertEquals("hive property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
+    assertEquals("hive property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName), hiveEnvProperties.get("hive_hostname"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.hive.properties"));
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
+
+    mockSupport.verifyAll();
+  }
+
+
+  @Test
+  public void testOozieConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.ambari.apache.org";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+    expect(mockHostGroupTwo.getName()).andReturn(expectedHostGroupNameTwo).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> oozieSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> oozieEnvProperties =
+      new HashMap<String, String>();
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("oozie-site", oozieSiteProperties);
+    configProperties.put("oozie-env", oozieEnvProperties);
+    configProperties.put("hive-env", oozieEnvProperties);
+    configProperties.put("core-site", coreSiteProperties);
+
+    oozieSiteProperties.put("oozie.base.url", expectedHostName);
+    oozieSiteProperties.put("oozie.authentication.kerberos.principal", expectedHostName);
+    oozieSiteProperties.put("oozie.service.HadoopAccessorService.kerberos.principal", expectedHostName);
+
+    oozieEnvProperties.put("oozie_hostname", expectedHostName);
+
+    coreSiteProperties.put("hadoop.proxyuser.oozie.hosts", expectedHostName + "," + expectedHostNameTwo);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, mockHostGroupTwo));
+
+    assertEquals("oozie property not exported correctly",
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.base.url"));
+    assertEquals("oozie property not exported correctly",
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
+    assertEquals("oozie property not exported correctly",
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
+    assertEquals("oozie property not exported correctly",
+      createExportedHostName(expectedHostGroupName), oozieEnvProperties.get("oozie_hostname"));
+    assertEquals("oozie property not exported correctly",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testZookeeperConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.ambari.apache.org";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+    expect(mockHostGroupTwo.getName()).andReturn(expectedHostGroupNameTwo).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> hbaseSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> webHCatSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("core-site", coreSiteProperties);
+    configProperties.put("hbase-site", hbaseSiteProperties);
+    configProperties.put("webhcat-site", webHCatSiteProperties);
+
+    coreSiteProperties.put("ha.zookeeper.quorum", expectedHostName + "," + expectedHostNameTwo);
+    hbaseSiteProperties.put("hbase.zookeeper.quorum", expectedHostName + "," + expectedHostNameTwo);
+    webHCatSiteProperties.put("templeton.zookeeper.hosts", expectedHostName + "," + expectedHostNameTwo);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, mockHostGroupTwo));
+
+    assertEquals("zookeeper config not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      coreSiteProperties.get("ha.zookeeper.quorum"));
+    assertEquals("zookeeper config not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      hbaseSiteProperties.get("hbase.zookeeper.quorum"));
+    assertEquals("zookeeper config not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.zookeeper.hosts"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testNagiosConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> nagiosEnvProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("nagios-env", nagiosEnvProperties);
+
+    nagiosEnvProperties.put("nagios_principal_name", expectedHostName);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne));
+
+    assertEquals("nagios config not properly exported",
+      createExportedHostName(expectedHostGroupName),
+      nagiosEnvProperties.get("nagios_principal_name"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  private static String createExportedAddress(String expectedPortNum, String expectedHostGroupName) {
+    return createExportedHostName(expectedHostGroupName) + ":" + expectedPortNum;
+  }
 
 
-    assertEquals("PropertyUpdater was not associated with the expected component name",
-                 "FALCON_SERVER", ((BlueprintConfigurationProcessor.SingleHostTopologyUpdater)fieldsToUpdaters.get("*.broker.url")).getComponentName());
+  private static String createExportedHostName(String expectedHostGroupName) {
+    return "%HOSTGROUP::" + expectedHostGroupName + "%";
   }
   }
 
 
   private class TestHostGroup implements HostGroup {
   private class TestHostGroup implements HostGroup {

+ 69 - 3
ambari-server/src/test/python/TestAmbariServer.py

@@ -3189,12 +3189,13 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     args = MagicMock()
     args = MagicMock()
     args.dbms = "postgres"
     args.dbms = "postgres"
     is_root_mock.return_value = True
     is_root_mock.return_value = True
-
-    # In Ambari 1.6.1, the DB name was actually stored in JDBC_DATABASE_PROPERTY, and the JDBC_DATABASE_NAME_PROPERTY
-    # property didn't exist. When upgrading to Ambari 1.7.0, the ambari.properties file should be transformed.
     get_ambari_version_mock.return_value = "1.7.0"
     get_ambari_version_mock.return_value = "1.7.0"
 
 
+    # Local Postgres
+    # In Ambari 1.6.1 for an embedded postgres database, the "server.jdbc.database" property stored the DB name,
+    # and the DB type was assumed to be "postgres" if the "server.persistence.type" property was "local"
     properties = ambari_server.Properties()
     properties = ambari_server.Properties()
+    properties.process_pair(ambari_server.PERSISTENCE_TYPE_PROPERTY, "local")
     properties.process_pair(ambari_server.JDBC_DATABASE_PROPERTY, "ambari")
     properties.process_pair(ambari_server.JDBC_DATABASE_PROPERTY, "ambari")
     get_ambari_properties_mock.return_value = properties
     get_ambari_properties_mock.return_value = properties
 
 
@@ -3205,6 +3206,71 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     else:
     else:
       self.assertTrue(write_property_mock.called)
       self.assertTrue(write_property_mock.called)
 
 
+    # External Postgres
+    # In Ambari 1.6.1 for an external postgres database, the "server.jdbc.database" property stored the
+    # DB type ("postgres"), and the "server.jdbc.schema" property stored the DB name.
+    write_property_mock.reset_mock()
+    properties = ambari_server.Properties()
+    properties.process_pair(ambari_server.PERSISTENCE_TYPE_PROPERTY, "remote")
+    properties.process_pair(ambari_server.JDBC_DATABASE_PROPERTY, "postgres")
+    properties.process_pair("server.jdbc.schema", "ambari")
+    properties.process_pair(ambari_server.JDBC_URL_PROPERTY, "jdbc:postgresql://c6410.ambari.apache.org:5432/ambari")
+
+    get_ambari_properties_mock.return_value = properties
+    try:
+      ambari_server.upgrade(args)
+    except FatalException as fe:
+      self.fail("Did not expect failure: " + str(fe))
+    else:
+      self.assertTrue(write_property_mock.called)
+
+    # External Postgres missing DB type, so it should be set based on the JDBC URL.
+    write_property_mock.reset_mock()
+    properties = ambari_server.Properties()
+    properties.process_pair(ambari_server.PERSISTENCE_TYPE_PROPERTY, "remote")
+    properties.process_pair("server.jdbc.schema", "ambari")
+    properties.process_pair(ambari_server.JDBC_URL_PROPERTY, "jdbc:postgresql://c6410.ambari.apache.org:5432/ambari")
+
+    get_ambari_properties_mock.return_value = properties
+    try:
+      ambari_server.upgrade(args)
+    except FatalException as fe:
+      self.fail("Did not expect failure: " + str(fe))
+    else:
+      self.assertTrue(write_property_mock.call_count == 2)
+
+    # External MySQL
+    # In Ambari 1.6.1 for an external MySQL database, the "server.jdbc.database" property stored the DB type ("mysql"),
+    # And the "server.jdbc.schema" property stored the DB name.
+    write_property_mock.reset_mock()
+    properties = ambari_server.Properties()
+    properties.process_pair(ambari_server.PERSISTENCE_TYPE_PROPERTY, "remote")
+    properties.process_pair(ambari_server.JDBC_DATABASE_PROPERTY, "mysql")
+    properties.process_pair("server.jdbc.schema", "ambari")
+    properties.process_pair(ambari_server.JDBC_URL_PROPERTY, "jdbc:mysql://c6409.ambari.apache.org:3306/ambari")
+    get_ambari_properties_mock.return_value = properties
+    try:
+      ambari_server.upgrade(args)
+    except FatalException as fe:
+      self.fail("Did not expect failure: " + str(fe))
+    else:
+      self.assertTrue(write_property_mock.called)
+
+    # External MySQL missing DB type, so it should be set based on the JDBC URL.
+    write_property_mock.reset_mock()
+    properties = ambari_server.Properties()
+    properties.process_pair(ambari_server.PERSISTENCE_TYPE_PROPERTY, "remote")
+    properties.process_pair("server.jdbc.schema", "ambari")
+    properties.process_pair(ambari_server.JDBC_URL_PROPERTY, "jdbc:mysql://c6409.ambari.apache.org:3306/ambari")
+
+    get_ambari_properties_mock.return_value = properties
+    try:
+      ambari_server.upgrade(args)
+    except FatalException as fe:
+      self.fail("Did not expect failure: " + str(fe))
+    else:
+      self.assertTrue(write_property_mock.call_count == 2)
+
 
 
   @patch("__builtin__.open")
   @patch("__builtin__.open")
   @patch("os.path.isfile")
   @patch("os.path.isfile")

+ 2 - 2
ambari-web/app/messages.js

@@ -1421,8 +1421,8 @@ Em.I18n.translations = {
     'on the Ambari Server host to make the JDBC driver available and to enable testing the database connection.',
     'on the Ambari Server host to make the JDBC driver available and to enable testing the database connection.',
 
 
   'services.service.config.configHistory.configGroup': 'Config Group',
   'services.service.config.configHistory.configGroup': 'Config Group',
-  'services.service.config.configHistory.leftArrow.tooltip': 'Show earlier versions',
-  'services.service.config.configHistory.rightArrow.tooltip': 'Show later versions',
+  'services.service.config.configHistory.rightArrow.tooltip': 'Show earlier versions',
+  'services.service.config.configHistory.leftArrow.tooltip': 'Show later versions',
   'services.service.config.configHistory.dismissIcon.tooltip': 'Dismiss',
   'services.service.config.configHistory.dismissIcon.tooltip': 'Dismiss',
   'services.service.config.configHistory.makeCurrent.message': 'Created from service config version {0}',
   'services.service.config.configHistory.makeCurrent.message': 'Created from service config version {0}',
   'services.service.config.configHistory.comparing': 'Comparing',
   'services.service.config.configHistory.comparing': 'Comparing',

+ 6 - 2
ambari-web/app/styles/application.less

@@ -5120,7 +5120,7 @@ ul.inline li {
       cursor: pointer;
       cursor: pointer;
       .icon-chevron-right,
       .icon-chevron-right,
       .icon-chevron-left{
       .icon-chevron-left{
-       color: #c3c3c3;
+        color: #d2d9dd;
       }
       }
       .icon-chevron-left:hover,
       .icon-chevron-left:hover,
       .icon-chevron-right:hover{
       .icon-chevron-right:hover{
@@ -5133,7 +5133,7 @@ ul.inline li {
       cursor: not-allowed;
       cursor: not-allowed;
       .icon-chevron-right,
       .icon-chevron-right,
       .icon-chevron-left{
       .icon-chevron-left{
-        color: #c3c3c3;
+        color: #d2d9dd;
       }
       }
     }
     }
   }
   }
@@ -5220,6 +5220,10 @@ ul.inline li {
       cursor: default;
       cursor: default;
       .content {
       .content {
         padding: 1px 5px 15px 5px;
         padding: 1px 5px 15px 5px;
+        .group {
+          text-align: right;
+          margin-top: -20px;
+        }
         .date{
         .date{
           color: #808080;
           color: #808080;
           font-size: 11px;
           font-size: 11px;

+ 63 - 61
ambari-web/app/templates/common/configs/config_history_flow.hbs

@@ -19,10 +19,10 @@
 
 
 <div id="config_history_flow" {{bindAttr class="view.showCompareVersionBar:two-stories-bar:one-story-bar"}}>
 <div id="config_history_flow" {{bindAttr class="view.showCompareVersionBar:two-stories-bar:one-story-bar"}}>
   <div class="version-slider">
   <div class="version-slider">
-    <div {{bindAttr disabled="view.showLeftArrow"}} {{bindAttr class=":icon-chevron-box :pull-left view.showLeftArrow::disabled"}} {{action shiftBack target="view"}} data-toggle="arrow-tooltip"
-      {{translateAttr data-original-title="services.service.config.configHistory.rightArrow.tooltip"}}><i class="icon-chevron-left icon-3x"></i></div>
-    <div {{bindAttr disabled="view.showRightArrow"}} {{bindAttr class=":icon-chevron-box :pull-left view.showRightArrow::disabled"}} {{action shiftForward target="view"}} data-toggle="arrow-tooltip"
-      {{translateAttr data-original-title="services.service.config.configHistory.leftArrow.tooltip"}}><i class="icon-chevron-right icon-3x"></i></div>
+    <div {{bindAttr class=":icon-chevron-box :pull-left view.showLeftArrow::disabled"}} {{action shiftBack target="view"}} data-toggle="arrow-tooltip"
+      {{bindAttr data-original-title="view.leftArrowTooltip"}}><i class="icon-chevron-left icon-3x"></i></div>
+    <div {{bindAttr class=":icon-chevron-box :pull-left view.showRightArrow::disabled"}} {{action shiftForward target="view"}} data-toggle="arrow-tooltip"
+      {{bindAttr data-original-title="view.rightArrowTooltip"}}><i class="icon-chevron-right icon-3x"></i></div>
     {{#each serviceVersion in view.visibleServiceVersion}}
     {{#each serviceVersion in view.visibleServiceVersion}}
       {{view view.serviceVersionBox serviceVersionBinding=serviceVersion}}
       {{view view.serviceVersionBox serviceVersionBinding=serviceVersion}}
     {{/each}}
     {{/each}}
@@ -54,63 +54,65 @@
       </div>
       </div>
     {{/if}}
     {{/if}}
       <div class="version-info-bar">
       <div class="version-info-bar">
-          <div class="row-fluid">
-            {{#if App.isManager}}
-              <div class="btn-group pull-left">
-                  <button id="toggle-dropdown-button" class="btn dropdown-toggle" data-toggle="dropdown" href="#" {{action hideFullList target="view"}} {{bindAttr disabled="view.versionActionsDisabled"}}>
-                      <i class="icon-random"></i>
-                      <span class="caret"></span>
-                  </button>
-                  <ul class="dropdown-menu">
-                    {{#each serviceVersion in view.dropDownList}}
-                        <li {{bindAttr class=":pointer :dropdown-submenu serviceVersion.isDisplayed:not-allowed"}}>
-                            <div class="row-fluid version-in-dropdown " {{action switchVersion serviceVersion target="view"}}>
-                                <div class="span2">{{serviceVersion.versionText}}</div>
-                                <div class="span6">{{serviceVersion.createdDate}}</div>
-                                <div class="span3">{{serviceVersion.author}}</div>
-                                <div class="pull-right"><i class="icon-caret-right"></i></div>
-                            </div>
-                            <ul class="dropdown-menu version-info-operations">
-                                <div class="content"><span class="label label-info">{{serviceVersion.versionText}}</span> <span class="pull-right"><strong>{{serviceVersion.configGroupName}}</strong></span>
-                                    <div class="date"><strong>{{serviceVersion.author}}</strong>&nbsp;{{t dashboard.configHistory.info-bar.authoredOn}}&nbsp;<strong>{{serviceVersion.createdDate}}</strong></div>
-                                    <div class="notes">{{{serviceVersion.fullNotes}}}</div>
-                                </div>
-                                <div class="version-operations-buttons">
-                                    <button {{bindAttr disabled="serviceVersion.disabledActionAttr.view" class=":btn serviceVersion.isDisplayed:not-allowed-cursor" title="serviceVersion.disabledActionMessages.view"}} {{action switchVersion serviceVersion target="view"}}><i class="icon-search"></i>&nbsp;{{t common.view}}</button>
-                                    <button {{bindAttr disabled="serviceVersion.disabledActionAttr.compare" class=":btn serviceVersion.isDisplayed:not-allowed-cursor" title="serviceVersion.disabledActionMessages.compare"}} {{action compare serviceVersion target="view"}}><i class="icon-copy"></i>&nbsp;{{t common.compare}}</button>
-                                    <button {{bindAttr disabled="serviceVersion.disabledActionAttr.revert" class=":btn serviceVersion.isCurrent:not-allowed-cursor" title="serviceVersion.disabledActionMessages.revert"}} {{action revert serviceVersion target="view"}}>{{t dashboard.configHistory.info-bar.revert.button}}</button>
-                                </div>
-                            </ul>
-                        </li>
-                    {{/each}}
-                    {{#unless view.showFullList}}
-                        <li class="align-center pointer" id="show_more">
-                            <a {{action openFullList target="view"}}>
-                              {{t dashboard.configHistory.info-bar.showMore}}&nbsp;{{view.displayName}}
-                                &nbsp;<span class="lowercase ellipsis">{{t dashboard.configHistory.title}}</span>
-                            </a>
-                        </li>
-                    {{/unless}}
-                  </ul>
-              </div>
-            {{/if}}
-              <div class="label-wrapper span8" data-toggle="tooltip" {{bindAttr data-original-title="view.displayedServiceVersion.fullNotes"}}>
-                  <span class="label label-info">{{view.displayedServiceVersion.versionText}}</span>
-                {{#if view.displayedServiceVersion.isCurrent}}
-                    <span class="label label-success">{{t common.current}}</span>
-                {{/if}}
-                  <strong>{{view.displayedServiceVersion.author}}</strong>&nbsp;{{t dashboard.configHistory.info-bar.authoredOn}}&nbsp;<strong>{{view.displayedServiceVersion.createdDate}}</strong>
-              </div>
-            {{#if App.isManager}}
-              <div class="pull-right operations-button">
-                  <div {{bindAttr class="view.displayedServiceVersion.isCurrent::hidden"}}>
-                      <button class="btn" {{action doCancel target="controller"}} {{bindAttr disabled="view.isDiscardDisabled"}}>{{t common.discard}}</button>
-                      <button class="btn btn-success" {{action save target="view"}} {{bindAttr disabled="view.isSaveDisabled"}}>{{t common.save}}</button>
-                  </div>
-                  <button class="btn btn-success"  {{action revert view.serviceVersionsReferences.displayed target="view"}} {{bindAttr disabled="view.versionActionsDisabled" class="view.displayedServiceVersion.isCurrent:hidden"}}>{{view.displayedServiceVersion.makeCurrentButtonText}}</button>
-              </div>
-            {{/if}}
-          </div>
+        <div class="row-fluid">
+          {{#if App.isManager}}
+            <div class="btn-group pull-left">
+              <button id="toggle-dropdown-button" class="btn dropdown-toggle" data-toggle="dropdown" href="#" {{action hideFullList target="view"}} {{bindAttr disabled="view.versionActionsDisabled"}}>
+                <i class="icon-random"></i>
+                <span class="caret"></span>
+              </button>
+              <ul class="dropdown-menu">
+                {{#each serviceVersion in view.dropDownList}}
+                  <li {{bindAttr class=":pointer :dropdown-submenu serviceVersion.isDisplayed:not-allowed"}}>
+                    <div class="row-fluid version-in-dropdown " {{action switchVersion serviceVersion target="view"}}>
+                      <div class="span2">{{serviceVersion.versionText}}</div>
+                      <div class="span6">{{serviceVersion.createdDate}}</div>
+                      <div class="span3">{{serviceVersion.author}}</div>
+                      <div class="pull-right"><i class="icon-caret-right"></i></div>
+                    </div>
+                    <ul class="dropdown-menu version-info-operations">
+                      <div class="content">
+                        <span class="label label-info">{{serviceVersion.versionText}}</span>
+                        <div class="group"><strong>{{serviceVersion.configGroupName}}</strong></div>
+                        <div class="date"><strong>{{serviceVersion.author}}</strong>&nbsp;{{t dashboard.configHistory.info-bar.authoredOn}}&nbsp;<strong>{{serviceVersion.createdDate}}</strong></div>
+                        <div class="notes">{{{serviceVersion.fullNotes}}}</div>
+                      </div>
+                      <div class="version-operations-buttons">
+                        <button {{bindAttr disabled="serviceVersion.disabledActionAttr.view" class=":btn serviceVersion.isDisplayed:not-allowed-cursor" title="serviceVersion.disabledActionMessages.view"}} {{action switchVersion serviceVersion target="view"}}><i class="icon-search"></i>&nbsp;{{t common.view}}</button>
+                        <button {{bindAttr disabled="serviceVersion.disabledActionAttr.compare" class=":btn serviceVersion.isDisplayed:not-allowed-cursor" title="serviceVersion.disabledActionMessages.compare"}} {{action compare serviceVersion target="view"}}><i class="icon-copy"></i>&nbsp;{{t common.compare}}</button>
+                        <button {{bindAttr disabled="serviceVersion.disabledActionAttr.revert" class=":btn serviceVersion.isCurrent:not-allowed-cursor" title="serviceVersion.disabledActionMessages.revert"}} {{action revert serviceVersion target="view"}}>{{t dashboard.configHistory.info-bar.revert.button}}</button>
+                      </div>
+                    </ul>
+                  </li>
+                {{/each}}
+                {{#unless view.showFullList}}
+                    <li class="align-center pointer" id="show_more">
+                        <a {{action openFullList target="view"}}>
+                          {{t dashboard.configHistory.info-bar.showMore}}&nbsp;{{view.displayName}}
+                            &nbsp;<span class="lowercase ellipsis">{{t dashboard.configHistory.title}}</span>
+                        </a>
+                    </li>
+                {{/unless}}
+              </ul>
+            </div>
+          {{/if}}
+            <div class="label-wrapper span8" data-toggle="tooltip" {{bindAttr data-original-title="view.displayedServiceVersion.fullNotes"}}>
+                <span class="label label-info">{{view.displayedServiceVersion.versionText}}</span>
+              {{#if view.displayedServiceVersion.isCurrent}}
+                  <span class="label label-success">{{t common.current}}</span>
+              {{/if}}
+                <strong>{{view.displayedServiceVersion.author}}</strong>&nbsp;{{t dashboard.configHistory.info-bar.authoredOn}}&nbsp;<strong>{{view.displayedServiceVersion.createdDate}}</strong>
+            </div>
+          {{#if App.isManager}}
+            <div class="pull-right operations-button">
+                <div {{bindAttr class="view.displayedServiceVersion.isCurrent::hidden"}}>
+                    <button class="btn" {{action doCancel target="controller"}} {{bindAttr disabled="view.isDiscardDisabled"}}>{{t common.discard}}</button>
+                    <button class="btn btn-success" {{action save target="view"}} {{bindAttr disabled="view.isSaveDisabled"}}>{{t common.save}}</button>
+                </div>
+                <button class="btn btn-success"  {{action revert view.serviceVersionsReferences.displayed target="view"}} {{bindAttr disabled="view.versionActionsDisabled" class="view.displayedServiceVersion.isCurrent:hidden"}}>{{view.displayedServiceVersion.makeCurrentButtonText}}</button>
+            </div>
+          {{/if}}
+        </div>
       </div>
       </div>
   </div>
   </div>
 </div>
 </div>

+ 2 - 1
ambari-web/app/templates/common/configs/service_version_box.hbs

@@ -36,7 +36,8 @@
     </div>
     </div>
 
 
     <div class="version-popover">
     <div class="version-popover">
-      <div class="content"><span class="label label-info">{{serviceVersion.versionText}}</span> <span class="pull-right"><strong>{{serviceVersion.configGroupName}}</strong></span>
+      <div class="content">
+        <span class="label label-info">{{serviceVersion.versionText}}</span> <span class="pull-right"><strong>{{serviceVersion.configGroupName}}</strong></span>
         <div class="date"><strong>{{serviceVersion.author}}</strong>&nbsp;{{t dashboard.configHistory.info-bar.authoredOn}}&nbsp;<strong>{{serviceVersion.createdDate}}</strong></div>
         <div class="date"><strong>{{serviceVersion.author}}</strong>&nbsp;{{t dashboard.configHistory.info-bar.authoredOn}}&nbsp;<strong>{{serviceVersion.createdDate}}</strong></div>
         <div class="notes">{{{serviceVersion.fullNotes}}}</div>
         <div class="notes">{{{serviceVersion.fullNotes}}}</div>
       </div>
       </div>

+ 13 - 1
ambari-web/app/views/common/configs/config_history_flow.js

@@ -27,6 +27,12 @@ App.ConfigHistoryFlowView = Em.View.extend({
   startIndex: 0,
   startIndex: 0,
   showLeftArrow: false,
   showLeftArrow: false,
   showRightArrow: false,
   showRightArrow: false,
+  leftArrowTooltip: function () {
+    return this.get('showLeftArrow') ? Em.I18n.t('services.service.config.configHistory.leftArrow.tooltip') : null;
+  }.property('showLeftArrow'),
+  rightArrowTooltip: function () {
+    return this.get('showRightArrow') ? Em.I18n.t('services.service.config.configHistory.rightArrow.tooltip') : null;
+  }.property('showRightArrow'),
   VERSIONS_IN_FLOW: 6,
   VERSIONS_IN_FLOW: 6,
   VERSIONS_IN_DROPDOWN: 6,
   VERSIONS_IN_DROPDOWN: 6,
   /**
   /**
@@ -126,7 +132,7 @@ App.ConfigHistoryFlowView = Em.View.extend({
    * by default 6 is number of items in short list
    * by default 6 is number of items in short list
    */
    */
   dropDownList: function () {
   dropDownList: function () {
-    var serviceVersions = this.get('serviceVersions').slice(0).reverse();
+    var serviceVersions = this.get('serviceVersions').slice(0);
     if (this.get('showFullList')) {
     if (this.get('showFullList')) {
       return serviceVersions;
       return serviceVersions;
     }
     }
@@ -439,6 +445,12 @@ App.ConfigHistoryFlowView = Em.View.extend({
       secondary: Em.I18n.t('common.cancel'),
       secondary: Em.I18n.t('common.cancel'),
       onSave: function () {
       onSave: function () {
         self.get('controller').set('serviceConfigVersionNote', this.get('serviceConfigNote'));
         self.get('controller').set('serviceConfigVersionNote', this.get('serviceConfigNote'));
+        var newVersionToBeCreated = App.ServiceConfigVersion.find().filterProperty('serviceName', self.get('serviceName')).get('length') + 1;
+        self.get('controller').set('preSelectedConfigVersion', Em.Object.create({
+          version: newVersionToBeCreated,
+          serviceName: self.get('displayedServiceVersion.serviceName'),
+          groupName: self.get('displayedServiceVersion.groupName')
+        }));
         self.get('controller').restartServicePopup();
         self.get('controller').restartServicePopup();
         this.hide();
         this.hide();
       },
       },

+ 7 - 3
ambari-web/app/views/main/host.js

@@ -140,9 +140,13 @@ App.MainHostView = App.TableView.extend(App.TableServerProvider, {
   rowsPerPageSelectView: Em.Select.extend({
   rowsPerPageSelectView: Em.Select.extend({
     content: ['10', '25', '50', '100'],
     content: ['10', '25', '50', '100'],
     attributeBindings: ['disabled'],
     attributeBindings: ['disabled'],
-    disabled: function () {
-      return !this.get('parentView.filteringComplete');
-    }.property('parentView.filteringComplete'),
+    disabled: true,
+
+    disableView: function () {
+      Em.run.next(this, function(){
+        this.set('disabled', !this.get('parentView.filteringComplete'));
+      });
+    }.observes('parentView.filteringComplete'),
 
 
     change: function () {
     change: function () {
       this.get('parentView').saveDisplayLength();
       this.get('parentView').saveDisplayLength();

+ 17 - 3
contrib/views/slider/docs/index.md

@@ -1,3 +1,17 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
 # Slider Apps View
 # Slider Apps View
 
 
 ## Security Guide
 ## Security Guide
@@ -7,7 +21,7 @@
 After deploying a HDP cluster through Ambari, it can be secured by using the *Enable Security* button in *Admin > Seurity* page.
 After deploying a HDP cluster through Ambari, it can be secured by using the *Enable Security* button in *Admin > Seurity* page.
 
 
 #### Step-2: Create *Kerberos* principal for view
 #### Step-2: Create *Kerberos* principal for view
-We need to provide a *Kerberos* identity for the process in which the view is run. We shall identify the user as `view-principal`. Since views are generally hosted by Ambari server, typically this can be named as *ambari*.
+We need to provide a *Kerberos* identity for the process in which the view is run. We shall identify the user as `view-principal`. **In this document `view-principal` can be changed to any suitable name.** Since views are generally hosted by Ambari server, typically this can be named as *ambari*.
 
 
 On the machine where *KDC Server* is hosted, create user principal by running below command
 On the machine where *KDC Server* is hosted, create user principal by running below command
 
 
@@ -34,8 +48,8 @@ chmod 440 /etc/security/keytabs/view-principal.headless.keytab
 #### Step-3: Configure *proxyuser* for created principal
 #### Step-3: Configure *proxyuser* for created principal
 Add the following configurations in *Custom core-site* section of *HDFS* service.
 Add the following configurations in *Custom core-site* section of *HDFS* service.
 
 
-* hadoop.proxyuser.ambari.groups = *
-* hadoop.proxyuser.ambari.hosts = `view-server-host`
+* hadoop.proxyuser.`view-principal`.groups = *
+* hadoop.proxyuser.`view-principal`.hosts = `view-server-host`
 
 
 This will in-turn show up in *core-site.xml* as
 This will in-turn show up in *core-site.xml* as
 
 

+ 1 - 0
contrib/views/slider/src/main/java/org/apache/ambari/view/slider/SliderAppsViewController.java

@@ -33,6 +33,7 @@ public interface SliderAppsViewController {
 
 
   public static final String PROPERTY_HDFS_ADDRESS = "hdfs.address";
   public static final String PROPERTY_HDFS_ADDRESS = "hdfs.address";
   public static final String PROPERTY_YARN_RM_ADDRESS = "yarn.resourcemanager.address";
   public static final String PROPERTY_YARN_RM_ADDRESS = "yarn.resourcemanager.address";
+  public static final String PROPERTY_YARN_RM_WEBAPP_ADDRESS = "yarn.resourcemanager.webapp.address";
   public static final String PROPERTY_YARN_RM_SCHEDULER_ADDRESS = "yarn.resourcemanager.scheduler.address";
   public static final String PROPERTY_YARN_RM_SCHEDULER_ADDRESS = "yarn.resourcemanager.scheduler.address";
   public static final String PROPERTY_ZK_QUOROM = "zookeeper.quorum";
   public static final String PROPERTY_ZK_QUOROM = "zookeeper.quorum";
   public static final String PROPERTY_GANGLIA_SERVER_HOSTNAME = "ganglia.server.hostname";
   public static final String PROPERTY_GANGLIA_SERVER_HOSTNAME = "ganglia.server.hostname";

+ 1 - 1
contrib/views/slider/src/main/java/org/apache/ambari/view/slider/SliderAppsViewControllerImpl.java

@@ -459,7 +459,7 @@ public class SliderAppsViewControllerImpl implements SliderAppsViewController {
     String rmAddress = viewContext.getProperties().get(PROPERTY_YARN_RM_ADDRESS);
     String rmAddress = viewContext.getProperties().get(PROPERTY_YARN_RM_ADDRESS);
     String rmSchedulerAddress = viewContext.getProperties().get(PROPERTY_YARN_RM_SCHEDULER_ADDRESS);
     String rmSchedulerAddress = viewContext.getProperties().get(PROPERTY_YARN_RM_SCHEDULER_ADDRESS);
     String zkQuorum = viewContext.getProperties().get(PROPERTY_ZK_QUOROM);
     String zkQuorum = viewContext.getProperties().get(PROPERTY_ZK_QUOROM);
-    boolean securedCluster = Boolean.getBoolean(viewContext.getProperties().get(PROPERTY_SLIDER_SECURITY_ENABLED));
+    boolean securedCluster = Boolean.valueOf(viewContext.getProperties().get(PROPERTY_SLIDER_SECURITY_ENABLED));
 
 
     HdfsConfiguration hdfsConfig = new HdfsConfiguration();
     HdfsConfiguration hdfsConfig = new HdfsConfiguration();
     YarnConfiguration yarnConfig = new YarnConfiguration(hdfsConfig);
     YarnConfiguration yarnConfig = new YarnConfiguration(hdfsConfig);

+ 1 - 1
contrib/views/slider/src/main/java/org/apache/ambari/view/slider/rest/client/SliderAppMasterClient.java

@@ -99,7 +99,7 @@ public class SliderAppMasterClient extends BaseHttpClient {
             quickLinks.put("JMX", entry.getValue().getAsString());
             quickLinks.put("JMX", entry.getValue().getAsString());
           } else if ("org.apache.slider.monitor".equals(entry.getKey())) {
           } else if ("org.apache.slider.monitor".equals(entry.getKey())) {
             quickLinks.put("UI", entry.getValue().getAsString());
             quickLinks.put("UI", entry.getValue().getAsString());
-          } else if ("app.metrics".equals(entry.getKey())) {
+          } else if ("org.apache.slider.metrics".equals(entry.getKey())) {
             quickLinks.put("Metrics", entry.getValue().getAsString());
             quickLinks.put("Metrics", entry.getValue().getAsString());
           } else {
           } else {
             quickLinks.put(entry.getKey(), entry.getValue().getAsString());
             quickLinks.put(entry.getKey(), entry.getValue().getAsString());

+ 1 - 1
contrib/views/slider/src/main/resources/ui/app/assets/data/apps/apps.json

@@ -12,7 +12,7 @@
       "metrics" : null,
       "metrics" : null,
       "name" : "h4",
       "name" : "h4",
       "startTime" : 1409348496653,
       "startTime" : 1409348496653,
-      "state" : "RUNNING",
+      "state" : "FROZEN",
       "type" : "hbase",
       "type" : "hbase",
       "user" : "yarn",
       "user" : "yarn",
       "version" : "1.0.0",
       "version" : "1.0.0",

+ 1 - 0
contrib/views/slider/src/main/resources/ui/app/assets/data/resource/slider-properties.json

@@ -16,6 +16,7 @@
       "hdfs.address" : "hdfs://slider-1.c.pramod-thangali.internal:8020",
       "hdfs.address" : "hdfs://slider-1.c.pramod-thangali.internal:8020",
       "yarn.resourcemanager.address" : "slider-2.c.pramod-thangali.internal:8050",
       "yarn.resourcemanager.address" : "slider-2.c.pramod-thangali.internal:8050",
       "yarn.resourcemanager.scheduler.address" : "slider-2.c.pramod-thangali.internal:8030",
       "yarn.resourcemanager.scheduler.address" : "slider-2.c.pramod-thangali.internal:8030",
+      "yarn.resourcemanager.webapp.address" : "slider-2.c.pramod-thangali.internal:8088",
       "zookeeper.quorum" : "slider-1.c.pramod-thangali.internal:2181,slider-2.c.pramod-thangali.internal:2181,slider-3.c.pramod-thangali.internal:2181",
       "zookeeper.quorum" : "slider-1.c.pramod-thangali.internal:2181,slider-2.c.pramod-thangali.internal:2181,slider-3.c.pramod-thangali.internal:2181",
       "ganglia.server.hostname" : "bvc",
       "ganglia.server.hostname" : "bvc",
       "ganglia.custom.clusters" : "'HBaseCluster1','7000','AccumuloCluster1','7001','HBaseCluster2','7002'",
       "ganglia.custom.clusters" : "'HBaseCluster1','7000','AccumuloCluster1','7001','HBaseCluster2','7002'",

+ 90 - 49
contrib/views/slider/src/main/resources/ui/app/controllers/slider_app_controller.js

@@ -48,6 +48,7 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
    */
    */
   availableActions: function() {
   availableActions: function() {
     var actions = Em.A([]),
     var actions = Em.A([]),
+      advanced = Em.A([]),
       status = this.get('model.status');
       status = this.get('model.status');
     if ('RUNNING' === status) {
     if ('RUNNING' === status) {
       actions.pushObject({
       actions.pushObject({
@@ -64,28 +65,77 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
       });
       });
     }
     }
     if ('FROZEN' === status) {
     if ('FROZEN' === status) {
-      actions.pushObjects([
-        {
-          title: 'Start',
-          action: 'thaw',
-          confirm: false
-        },
-        {
-          title: 'Destroy',
-          action: 'destroy',
-          confirm: true
-        }
-      ]);
+      actions.pushObject({
+        title: 'Start',
+        action: 'thaw',
+        confirm: false
+      });
+      advanced.pushObject({
+        title: 'Destroy',
+        action: 'destroy',
+        customConfirm: 'confirmDestroy'
+      });
+    }
+    if (advanced.length) {
+      actions.pushObject({
+        title: 'Advanced',
+        submenu: advanced
+      });
     }
     }
     return actions;
     return actions;
   }.property('model.status'),
   }.property('model.status'),
 
 
+  /**
+   * Checkbox in the destroy-modal
+   * If true - enable "Destroy"-button
+   * @type {bool}
+   */
+  confirmChecked: false,
+
+  /**
+   * Inverted <code>confirmChecked</code>-value
+   * Used in <code>App.DestroyAppPopupFooterView</code> to enable "Destroy"-button
+   * @type {bool}
+   */
+  destroyButtonEnabled: Ember.computed.not('confirmChecked'),
+
   /**
   /**
    * Method's name that should be called for model
    * Method's name that should be called for model
    * @type {string}
    * @type {string}
    */
    */
   currentAction: null,
   currentAction: null,
 
 
+  /**
+   * Grouped components by name
+   * @type {{name: string, count: number}[]}
+   */
+  groupedComponents: [],
+
+  /**
+   * Does new instance counts are invalid
+   * @type {bool}
+   */
+  groupedComponentsHaveErrors: false,
+
+  /**
+   * Custom popup for "Destroy"-action
+   * @method destroyConfirm
+   */
+  confirmDestroy: function() {
+    var modalComponent = this.container.lookup('component-lookup:main').
+      lookupFactory('bs-modal', this.get('container')).create();
+    modalComponent.setProperties({
+      name: 'confirm-modal',
+      title: Ember.I18n.t('sliderApp.destroy.confirm.title'),
+      manual: true,
+      targetObject: this,
+      body: App.DestroyAppPopupView,
+      controller: this,
+      footerViews: [App.DestroyAppPopupFooterView]
+    });
+    Bootstrap.ModalManager.register('confirm-modal', modalComponent);
+  },
+
   /**
   /**
    * Try call controller's method with name stored in <code>currentAction</code>
    * Try call controller's method with name stored in <code>currentAction</code>
    * @method tryDoAction
    * @method tryDoAction
@@ -148,21 +198,6 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
     });
     });
   },
   },
 
 
-  /**
-   * Buttons for Flex modal popup
-   * @type {Em.Object[]}
-   */
-  flexModalButtons: [
-    Ember.Object.create({title: Em.I18n.t('common.cancel'), clicked:"closeFlex", dismiss: 'modal'}),
-    Ember.Object.create({title: Em.I18n.t('common.send'), clicked:"submitFlex", type:'success'})
-  ],
-
-  /**
-   * Grouped components by name
-   * @type {{name: string, count: number}[]}
-   */
-  groupedComponents: [],
-
   /**
   /**
    * Group components by <code>componentName</code> and save them to <code>groupedComponents</code>
    * Group components by <code>componentName</code> and save them to <code>groupedComponents</code>
    * @method groupComponents
    * @method groupComponents
@@ -185,12 +220,6 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
     this.set('groupedComponents', groupedComponents);
     this.set('groupedComponents', groupedComponents);
   },
   },
 
 
-  /**
-   * Does new instance counts are invalid
-   * @type {bool}
-   */
-  groupedComponentsHaveErrors: false,
-
   /**
   /**
    * Validate new instance counts for components (should be integer and >= 0)
    * Validate new instance counts for components (should be integer and >= 0)
    * @method validateGroupedComponents
    * @method validateGroupedComponents
@@ -222,7 +251,10 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
       'flex-popup',
       'flex-popup',
       'Flex',
       'Flex',
       'slider_app/flex_popup',
       'slider_app/flex_popup',
-      this.get('flexModalButtons'),
+      Em.A([
+        Ember.Object.create({title: Em.I18n.t('common.cancel'), clicked:"closeFlex", dismiss: 'modal'}),
+        Ember.Object.create({title: Em.I18n.t('common.send'), clicked:"submitFlex", type:'success'})
+      ]),
       this
       this
     );
     );
   },
   },
@@ -271,7 +303,7 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
   },
   },
 
 
   /**
   /**
-   * Complate-callback for "destroy app"-request
+   * Complete-callback for "destroy app"-request
    * @method destroyCompleteCallback
    * @method destroyCompleteCallback
    */
    */
   destroyCompleteCallback: function() {
   destroyCompleteCallback: function() {
@@ -322,6 +354,7 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
      */
      */
     modalConfirmed: function() {
     modalConfirmed: function() {
       this.tryDoAction();
       this.tryDoAction();
+      this.set('confirmChecked', false);
       return Bootstrap.ModalManager.close('confirm-modal');
       return Bootstrap.ModalManager.close('confirm-modal');
     },
     },
 
 
@@ -331,6 +364,7 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
      * @method modalCanceled
      * @method modalCanceled
      */
      */
     modalCanceled: function() {
     modalCanceled: function() {
+      this.set('confirmChecked', false);
       return Bootstrap.ModalManager.close('confirm-modal');
       return Bootstrap.ModalManager.close('confirm-modal');
     },
     },
 
 
@@ -340,24 +374,31 @@ App.SliderAppController = Ember.ObjectController.extend(App.AjaxErrorHandler, {
      * @method openModal
      * @method openModal
      */
      */
     openModal: function(option) {
     openModal: function(option) {
+      if (!option.action) return false;
       this.set('currentAction', option.action);
       this.set('currentAction', option.action);
-      if (option.confirm) {
-        Bootstrap.ModalManager.open(
-          "confirm-modal",
-          Ember.I18n.t('common.confirmation'),
-          Ember.View.extend({
-            template: Ember.Handlebars.compile('{{t question.sure}}')
-          }),
-          [
-            Ember.Object.create({title: Em.I18n.t('common.cancel'), clicked:"modalCanceled", dismiss: 'modal'}),
-            Ember.Object.create({title: Em.I18n.t('ok'), clicked:"modalConfirmed", type:'success'})
-          ],
-          this
-        );
+      if (!Em.isNone(option.customConfirm) && Ember.typeOf(this.get(option.customConfirm)) === 'function') {
+        this[option.customConfirm]();
       }
       }
       else {
       else {
-        this.tryDoAction();
+        if (option.confirm) {
+          Bootstrap.ModalManager.open(
+            "confirm-modal",
+            Ember.I18n.t('common.confirmation'),
+            Ember.View.extend({
+              template: Ember.Handlebars.compile('{{t question.sure}}')
+            }),
+            [
+              Ember.Object.create({title: Em.I18n.t('common.cancel'), clicked:"modalCanceled", dismiss: 'modal'}),
+              Ember.Object.create({title: Em.I18n.t('ok'), clicked:"modalConfirmed", type:'success'})
+            ],
+            this
+          );
+        }
+        else {
+          this.tryDoAction();
+        }
       }
       }
+      return true;
     }
     }
   }
   }
 
 

+ 7 - 2
contrib/views/slider/src/main/resources/ui/app/mappers/slider_apps_mapper.js

@@ -92,6 +92,7 @@ App.SliderAppsMapper = App.Mapper.createWithMixins(App.RunPeriodically, {
             id: appId + component.componentName + i,
             id: appId + component.componentName + i,
             status: activeContainers[i] ? "Running" : "Stopped",
             status: activeContainers[i] ? "Running" : "Stopped",
             host: activeContainers[i] ? component.activeContainers[activeContainers[i]].host : "",
             host: activeContainers[i] ? component.activeContainers[activeContainers[i]].host : "",
+            containerId: activeContainers[i] ? component.activeContainers[activeContainers[i]].name : "",
             componentName: component.componentName,
             componentName: component.componentName,
             appId: appId
             appId: appId
           })
           })
@@ -135,8 +136,12 @@ App.SliderAppsMapper = App.Mapper.createWithMixins(App.RunPeriodically, {
       yarnAppId += appId.substring(index + 1);
       yarnAppId += appId.substring(index + 1);
     }
     }
     var yarnUI = "http://"+window.location.hostname+":8088";
     var yarnUI = "http://"+window.location.hostname+":8088";
-    if (App.viewUrls) {
-      yarnUI = App.viewUrls['yarn.resourcemanager.webapp.address'];
+    var viewConfigs = App.SliderApp.store.all('sliderConfig');
+    if (!Em.isNone(viewConfigs)) {
+      var viewConfig = viewConfigs.findBy('viewConfigName', 'yarn.resourcemanager.webapp.address');
+      if (!Em.isNone(viewConfig)) {
+        yarnUI = viewConfig.get('value');
+      }
     }
     }
     quickLinks.push(
     quickLinks.push(
       Ember.Object.create({
       Ember.Object.create({

+ 15 - 1
contrib/views/slider/src/main/resources/ui/app/models/slider_app_component.js

@@ -33,6 +33,11 @@ App.SliderAppComponent = DS.Model.extend({
    */
    */
   componentName: DS.attr('string'),
   componentName: DS.attr('string'),
 
 
+  /**
+   * @type {string}
+   */
+  containerId: DS.attr('string'),
+
   /**
   /**
    * @type {App.SliderApp}
    * @type {App.SliderApp}
    */
    */
@@ -44,7 +49,16 @@ App.SliderAppComponent = DS.Model.extend({
    */
    */
   isRunning: function() {
   isRunning: function() {
     return this.get('status') === 'Running';
     return this.get('status') === 'Running';
-  }.property('status')
+  }.property('status'),
+
+  url: function() {
+    var host = this.get('host');
+    var containerId = this.get('containerId');
+    if (host != null && containerId != null) {
+      return "http://" + this.get('host') + ":8042/node/container/" + this.get('containerId');
+    }
+    return null;
+  }.property('host', 'containerId')
 
 
 });
 });
 
 

+ 11 - 10
contrib/views/slider/src/main/resources/ui/app/routes/main.js

@@ -54,16 +54,17 @@ App.SliderAppsRoute = Ember.Route.extend({
     App.SliderApp.store.pushMany('sliderConfig', Em.A([
     App.SliderApp.store.pushMany('sliderConfig', Em.A([
       Em.Object.create({id: 1, required: true, viewConfigName: 'hdfs.address', displayName: 'hdfsAddress', linkedService: 'HDFS'}),
       Em.Object.create({id: 1, required: true, viewConfigName: 'hdfs.address', displayName: 'hdfsAddress', linkedService: 'HDFS'}),
       Em.Object.create({id: 2, required: true, viewConfigName: 'yarn.resourcemanager.address', displayName: 'yarnResourceManager', linkedService: 'YARN'}),
       Em.Object.create({id: 2, required: true, viewConfigName: 'yarn.resourcemanager.address', displayName: 'yarnResourceManager', linkedService: 'YARN'}),
-      Em.Object.create({id: 3, required: true, viewConfigName: 'yarn.resourcemanager.scheduler.address',  displayName: 'yarnResourceManagerScheduler'}),
-      Em.Object.create({id: 4, required: true, viewConfigName: 'zookeeper.quorum', displayName: 'zookeeperQuorum', linkedService: 'ZOOKEEPER'}),
-      Em.Object.create({id: 5, required: false, viewConfigName: 'ganglia.server.hostname', displayName: 'gangliaServer'}),
-      Em.Object.create({id: 6, required: false, viewConfigName: 'ganglia.custom.clusters', displayName: 'gangliaClusters'}),
-      Em.Object.create({id: 7, required: false, viewConfigName: 'slider.user', displayName: 'sliderUser'}),
-      Em.Object.create({id: 8, required: false, viewConfigName: 'slider.security.enabled', displayName: 'sliderSecurityEnabled'}),
-      Em.Object.create({id: 9, required: false, requireDependsOn: 8, viewConfigName: 'yarn.resourcemanager.principal', displayName: 'yarnResourceManagerPrincipal'}),
-      Em.Object.create({id: 10, required: false, requireDependsOn: 8, viewConfigName: 'dfs.namenode.kerberos.principal', displayName: 'dfsNamenodeKerberosPrincipal'}),
-      Em.Object.create({id: 11, required: false, requireDependsOn: 8, viewConfigName: 'view.kerberos.principal', displayName: 'viewKerberosPrincipal'}),
-      Em.Object.create({id: 12, required: false, requireDependsOn: 8, viewConfigName: 'view.kerberos.principal.keytab', displayName: 'ViewKerberosPrincipalKeytab'})
+      Em.Object.create({id: 3, required: true, viewConfigName: 'yarn.resourcemanager.webapp.address', displayName: 'yarnResourceManagerWebapp', linkedService: 'YARN'}),
+      Em.Object.create({id: 4, required: true, viewConfigName: 'yarn.resourcemanager.scheduler.address',  displayName: 'yarnResourceManagerScheduler'}),
+      Em.Object.create({id: 5, required: true, viewConfigName: 'zookeeper.quorum', displayName: 'zookeeperQuorum', linkedService: 'ZOOKEEPER'}),
+      Em.Object.create({id: 6, required: false, viewConfigName: 'ganglia.server.hostname', displayName: 'gangliaServer'}),
+      Em.Object.create({id: 7, required: false, viewConfigName: 'ganglia.custom.clusters', displayName: 'gangliaClusters'}),
+      Em.Object.create({id: 8, required: false, viewConfigName: 'slider.user', displayName: 'sliderUser'}),
+      Em.Object.create({id: 9, required: true, viewConfigName: 'slider.security.enabled', displayName: 'sliderSecurityEnabled'}),
+      Em.Object.create({id: 10, required: false, requireDependsOn: 9, viewConfigName: 'yarn.resourcemanager.principal', displayName: 'yarnResourceManagerPrincipal'}),
+      Em.Object.create({id: 11, required: false, requireDependsOn: 9, viewConfigName: 'dfs.namenode.kerberos.principal', displayName: 'dfsNamenodeKerberosPrincipal'}),
+      Em.Object.create({id: 12, required: false, requireDependsOn: 9, viewConfigName: 'view.kerberos.principal', displayName: 'viewKerberosPrincipal'}),
+      Em.Object.create({id: 13, required: false, requireDependsOn: 9, viewConfigName: 'view.kerberos.principal.keytab', displayName: 'ViewKerberosPrincipalKeytab'})
     ]));
     ]));
   },
   },
 
 

+ 32 - 0
contrib/views/slider/src/main/resources/ui/app/styles/application.less

@@ -923,3 +923,35 @@ select {
   word-wrap: break-word;
   word-wrap: break-word;
   overflow: auto;
   overflow: auto;
 }
 }
+
+.dropdown-submenu {
+  position:relative;
+}
+.dropdown-submenu>.dropdown-menu {
+  top:0;
+  left:-100%;
+  margin-top:-6px;
+  margin-left:-1px;
+  -webkit-border-radius:6px 0 6px 6px;
+  -moz-border-radius:6px 0 6px 6px;
+  border-radius:6px 0 6px 6px;
+}
+.dropdown-submenu:hover>.dropdown-menu {
+  display:block;
+}
+.dropdown-submenu>a:before {
+  display:block;
+  content:" ";
+  float:left;
+  width:0;
+  height:0;
+  border-color:transparent;
+  border-style:solid;
+  border-width:5px 5px 5px 0px;
+  border-right-color:#cccccc;
+  margin-top:5px;
+  margin-left:-10px;
+}
+.dropdown-submenu:hover>a:after {
+  border-left-color:#ffffff;
+}

+ 8 - 1
contrib/views/slider/src/main/resources/ui/app/templates/slider_app.hbs

@@ -32,8 +32,15 @@
       </a>
       </a>
       <ul class="dropdown-menu">
       <ul class="dropdown-menu">
         {{#each option in controller.availableActions}}
         {{#each option in controller.availableActions}}
-          <li>
+          <li {{bind-attr class="option.submenu.length:dropdown-submenu"}}>
             <a {{action 'openModal' option target='controller'}}>{{humanize option.title}}</a>
             <a {{action 'openModal' option target='controller'}}>{{humanize option.title}}</a>
+            <ul class="dropdown-menu">
+              {{#each subitem in option.submenu}}
+                <li>
+                  <a {{action 'openModal' subitem target='controller'}}>{{humanize subitem.title}}</a>
+                </li>
+              {{/each}}
+            </ul>
           </li>
           </li>
         {{/each}}
         {{/each}}
       </ul>
       </ul>

+ 20 - 0
contrib/views/slider/src/main/resources/ui/app/templates/slider_app/destroy/destroy_popup.hbs

@@ -0,0 +1,20 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+<p class="alert alert-danger"> <span class="icon-warning-sign"></span> {{t sliderApp.destroy.confirm.body}}</p>
+{{input type="checkbox" checkedBinding="controller.confirmChecked"}} {{{view.confirmMessage}}}

+ 28 - 0
contrib/views/slider/src/main/resources/ui/app/templates/slider_app/destroy/destroy_popup_footer.hbs

@@ -0,0 +1,28 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{bs-button
+  content=view.cancelButton
+  targetObjectBinding="view.targetObject"
+}}
+
+{{bs-button
+  content=view.destroyButton
+  disabledBinding="controller.destroyButtonEnabled"
+  targetObjectBinding="view.targetObject"
+}}

+ 7 - 1
contrib/views/slider/src/main/resources/ui/app/templates/slider_app/summary.hbs

@@ -59,7 +59,13 @@
           {{#each controller.model.components}}
           {{#each controller.model.components}}
           <tr>
           <tr>
             <td><span {{bind-attr class="isRunning:icon-ok-sign:icon-warning-sign :status"}}></span> {{componentName}}</td>
             <td><span {{bind-attr class="isRunning:icon-ok-sign:icon-warning-sign :status"}}></span> {{componentName}}</td>
-            <td>{{host}}</td>
+            <td>
+              {{#if url}}
+                <a {{bind-attr href="url"}} target="_blank">{{host}}</a>
+              {{else}}
+                {{host}}
+              {{/if}}
+            </td>
           </tr>
           </tr>
           {{/each}}
           {{/each}}
         </tbody>
         </tbody>

+ 5 - 0
contrib/views/slider/src/main/resources/ui/app/translations.js

@@ -31,6 +31,7 @@ Em.I18n.translations = {
     'name': "Name",
     'name': "Name",
     'back': "Back",
     'back': "Back",
     'delete': 'Delete',
     'delete': 'Delete',
+    'destroy': 'Destroy',
     'value': "Value",
     'value': "Value",
     'next': "Next",
     'next': "Next",
     'quickLinks': "Quick Links",
     'quickLinks': "Quick Links",
@@ -97,6 +98,10 @@ Em.I18n.translations = {
   'sliderApp.alerts.brLastCheck': "\nLast Checked {0}",
   'sliderApp.alerts.brLastCheck': "\nLast Checked {0}",
   'sliderApp.alerts.occurredOn': 'Occurred on {0}, {1}',
   'sliderApp.alerts.occurredOn': 'Occurred on {0}, {1}',
 
 
+  'sliderApp.destroy.confirm.title': 'Destroy Slider App',
+  'sliderApp.destroy.confirm.body': 'Destroying a Slider App could result in data loss if not property performed. Make sure you have backed up data handled by the application.',
+  'sliderApp.destroy.confirm.body2': 'Are you sure you want to destroy Slider App <em>{0}</em>?',
+
   'wizard.name': 'Create App',
   'wizard.name': 'Create App',
   'wizard.step1.name': 'Select Type',
   'wizard.step1.name': 'Select Type',
   'wizard.step1.header': 'Available Applications',
   'wizard.step1.header': 'Available Applications',

+ 41 - 0
contrib/views/slider/src/main/resources/ui/app/views/slider_app/destroy_modal_footer_view.js

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+App.DestroyAppPopupFooterView = Ember.View.extend({
+
+  /**
+   * targetObject should be defined for buttons and other components that may set actions
+   * @type {Em.Controller}
+   */
+  targetObjectBinding: 'controller',
+
+  templateName: 'slider_app/destroy/destroy_popup_footer',
+
+  /**
+   * Destroy-button
+   * @type {Em.Object}
+   */
+  destroyButton: Em.Object.create({title: Em.I18n.t('common.destroy'), clicked: "modalConfirmed", type:'success'}),
+
+  /**
+   * Cancel-button
+   * @type {Em.Object}
+   */
+  cancelButton: Em.Object.create({title: Em.I18n.t('common.cancel'), clicked: "modalCanceled"})
+
+});

+ 37 - 0
contrib/views/slider/src/main/resources/ui/app/views/slider_app/destroy_popup_view.js

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+App.DestroyAppPopupView = Ember.View.extend({
+
+  templateName: 'slider_app/destroy/destroy_popup',
+
+  /**
+   * targetObject should be defined for buttons and other components that may set actions
+   * @type {Em.Controller}
+   */
+  targetObjectBinding: 'controller',
+
+  /**
+   * Warning message for dummy user
+   * @type {string}
+   */
+  confirmMessage: function() {
+    return Em.I18n.t('sliderApp.destroy.confirm.body2').format(this.get('controller.model.name'));
+  }.property()
+
+});

+ 6 - 1
contrib/views/slider/src/main/resources/view.xml

@@ -20,7 +20,7 @@ limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
   <version>1.0.0</version>
   <version>1.0.0</version>
   <parameter>
   <parameter>
     <name>hdfs.address</name>
     <name>hdfs.address</name>
-    <description>The URL to access HDFS service via its protocol. Typically this is the fs.defaultFS property in the core-site.xml configuration. For example: hdfs://hdfs.namenode.host:8020.</description>
+    <description>The URL to access HDFS service via its protocol. Typically this is the fs.defaultFS property in the core-site.xml configuration file. For example: hdfs://hdfs.namenode.host:8020.</description>
     <required>true</required>
     <required>true</required>
   </parameter>
   </parameter>
   <parameter>
   <parameter>
@@ -28,6 +28,11 @@ limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
     <description>The URL to the YARN ResourceManager, used to provide YARN Application data. For example: http://yarn.resourcemanager.host:8050</description>
     <description>The URL to the YARN ResourceManager, used to provide YARN Application data. For example: http://yarn.resourcemanager.host:8050</description>
     <required>true</required>
     <required>true</required>
   </parameter>
   </parameter>
+  <parameter>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <description>The URL to the YARN ResourceManager Web Application, used to provide YARN UI. Typically this is the yarn.resourcemanager.webapp.address config from yarn-site.xml configuration file. For example: http://yarn.resourcemanager.host:8088</description>
+    <required>true</required>
+  </parameter>
   <parameter>
   <parameter>
     <name>yarn.resourcemanager.scheduler.address</name>
     <name>yarn.resourcemanager.scheduler.address</name>
     <description>The URL to the YARN ResourceManager Scheduler, which schedules YARN Applications. For example: http://yarn.resourcemanager.host:8030</description>
     <description>The URL to the YARN ResourceManager Scheduler, which schedules YARN Applications. For example: http://yarn.resourcemanager.host:8030</description>