Просмотр исходного кода

Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714

Jayush Luniya 8 лет назад
Родитель
Сommit
045d9bfe3e
100 измененных файлов с 3803 добавлено и 898 удалено
  1. 54 14
      ambari-common/src/main/python/resource_management/libraries/script/script.py
  2. 7 4
      ambari-logsearch/ambari-logsearch-web/pom.xml
  3. 17 10
      ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java
  4. 10 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
  5. 4 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  6. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
  7. 1 8
      ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
  8. 152 92
      ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
  9. 15 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
  10. 60 21
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
  11. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
  12. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java
  13. 11 0
      ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
  14. 1 0
      ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
  15. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java
  16. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java
  17. 57 14
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
  18. 1 2
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
  19. 4 2
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java
  20. 109 33
      ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java
  21. 54 3
      ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
  22. 32 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
  23. 156 8
      ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
  24. 29 0
      ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
  25. 52 0
      ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java
  26. 71 0
      ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java
  27. 10 0
      ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
  28. 18 6
      ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
  29. 25 0
      ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
  30. 4 14
      ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java
  31. 160 0
      ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
  32. 10 0
      ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
  33. 12 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
  34. 3 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
  35. 12 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
  36. 57 5
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
  37. 5 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
  38. 21 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
  39. 0 6
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py
  40. 19 2
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
  41. 0 2
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  42. 10 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
  43. 6 0
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
  44. 12 1
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
  45. 57 5
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
  46. 5 0
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
  47. 20 0
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
  48. 0 6
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py
  49. 19 2
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
  50. 0 2
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
  51. 10 0
      ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
  52. 4 2
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
  53. 0 3
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
  54. 3 2
      ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
  55. 0 3
      ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
  56. 21 12
      ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py
  57. 12 0
      ambari-server/src/main/resources/configuration-schema.xsd
  58. 1 0
      ambari-server/src/main/resources/properties.json
  59. 3 0
      ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml
  60. 0 30
      ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
  61. 0 5
      ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
  62. 32 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
  63. 4 4
      ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
  64. 7 7
      ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
  65. 28 3
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
  66. 74 2
      ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
  67. 20 0
      ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java
  68. 58 2
      ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
  69. 145 5
      ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java
  70. 2 0
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  71. 17 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
  72. 33 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  73. 24 77
      ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py
  74. 3 1
      ambari-server/src/test/python/stacks/utils/RMFTestCase.py
  75. 1 1
      ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
  76. 1 1
      ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
  77. 8 0
      ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
  78. 22 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
  79. 63 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
  80. 26 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
  81. 145 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
  82. 223 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
  83. 137 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
  84. 199 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
  85. 396 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
  86. 30 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
  87. 20 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
  88. 26 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
  89. 23 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
  90. 26 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
  91. 2 0
      ambari-web/app/assets/test/tests.js
  92. 1 0
      ambari-web/app/controllers.js
  93. 468 0
      ambari-web/app/controllers/main/service/info/metric.js
  94. 1 448
      ambari-web/app/controllers/main/service/info/summary.js
  95. 1 1
      ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js
  96. 1 0
      ambari-web/app/messages.js
  97. 1 0
      ambari-web/app/styles/common.less
  98. 25 1
      ambari-web/app/styles/enhanced_service_dashboard.less
  99. 46 18
      ambari-web/app/styles/theme/bootstrap-ambari.css
  100. 22 0
      ambari-web/app/styles/top-nav.less

+ 54 - 14
ambari-common/src/main/python/resource_management/libraries/script/script.py

@@ -501,6 +501,7 @@ class Script(object):
       Script.stack_version_from_distro_select = pkg_provider.get_installed_package_version(
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
+
     return Script.stack_version_from_distro_select
 
 
@@ -525,22 +526,20 @@ class Script(object):
     """
     This function replaces ${stack_version} placeholder with actual version.  If the package
     version is passed from the server, use that as an absolute truth.
-    
+
     :param name name of the package
     :param repo_version actual version of the repo currently installing
     """
-    stack_version_package_formatted = ""
+    if not STACK_VERSION_PLACEHOLDER in name:
+      return name
 
-    if not repo_version:
-      repo_version = self.get_stack_version_before_packages_installed()
+    stack_version_package_formatted = ""
 
     package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'
 
     # repositoryFile is the truth
     # package_version should be made to the form W_X_Y_Z_nnnn
     package_version = default("repositoryFile/repoVersion", None)
-    if package_version is not None:
-      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
 
     # TODO remove legacy checks
     if package_version is None:
@@ -550,6 +549,17 @@ class Script(object):
     if package_version is None:
       package_version = default("hostLevelParams/package_version", None)
 
+    package_version = None
+    if (package_version is None or '-' not in package_version) and default('/repositoryFile', None):
+      self.load_available_packages()
+      package_name = self.get_package_from_available(name, self.available_packages_in_repos)
+      if package_name is None:
+        raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos))
+      return package_name
+
+    if package_version is not None:
+      package_version = package_version.replace('.', package_delimiter).replace('-', package_delimiter)
+
     # The cluster effective version comes down when the version is known after the initial
     # install.  In that case we should not be guessing which version when invoking INSTALL, but
     # use the supplied version to build the package_version
@@ -568,6 +578,7 @@ class Script(object):
 
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
     if not package_version or '*' in package_version:
+      repo_version = self.get_stack_version_before_packages_installed()
       stack_version_package_formatted = repo_version.replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
@@ -760,6 +771,19 @@ class Script(object):
     """
     self.install_packages(env)
 
+  def load_available_packages(self):
+    if self.available_packages_in_repos:
+      return self.available_packages_in_repos
+
+
+    pkg_provider = get_provider("Package")   
+    try:
+      self.available_packages_in_repos = pkg_provider.get_available_packages_in_repos(self.get_config()['repositoryFile']['repositories'])
+    except Exception as err:
+      Logger.exception("Unable to load available packages")
+      self.available_packages_in_repos = []
+
+
   def install_packages(self, env):
     """
     List of packages that are required< by service is received from the server
@@ -782,17 +806,11 @@ class Script(object):
       package_list_str = config['hostLevelParams']['package_list']
       agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
       agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
-      pkg_provider = get_provider("Package")
-      try:
-        available_packages_in_repos = pkg_provider.get_available_packages_in_repos(config['repositoryFile']['repositories'])
-      except Exception as err:
-        Logger.exception("Unable to load available packages")
-        available_packages_in_repos = []
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
           if self.check_package_condition(package):
-            name = self.get_package_from_available(package['name'], available_packages_in_repos)
+            name = self.format_package_name(package['name'])
             # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
             # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
             # <osFamily>any<osFamily> which would cause installation failure on Windows.
@@ -988,12 +1006,33 @@ class Script(object):
 
   def configure(self, env, upgrade_type=None, config_dir=None):
     """
-    To be overridden by subclasses
+    To be overridden by subclasses (may invoke save_configs)
     :param upgrade_type: only valid during RU/EU, otherwise will be None
     :param config_dir: for some clients during RU, the location to save configs to, otherwise None
     """
     self.fail_with_error('configure method isn\'t implemented')
 
+  def save_configs(self, env):
+    """
+    To be overridden by subclasses
+    Creates / updates configuration files
+    """
+    self.fail_with_error('save_configs method isn\'t implemented')
+
+  def reconfigure(self, env):
+    """
+    Default implementation of RECONFIGURE action which may be overridden by subclasses
+    """
+    Logger.info("Refresh config files ...")
+    self.save_configs(env)
+
+    config = self.get_config()
+    if "reconfigureAction" in config["commandParams"] and config["commandParams"]["reconfigureAction"] is not None:
+      reconfigure_action = config["commandParams"]["reconfigureAction"]
+      Logger.info("Call %s" % reconfigure_action)
+      method = self.choose_method_to_execute(reconfigure_action)
+      method(env)
+
   def generate_configs_get_template_file_content(self, filename, dicts):
     config = self.get_config()
     content = ''
@@ -1092,5 +1131,6 @@ class Script(object):
 
 
   def __init__(self):
+    self.available_packages_in_repos = []
     if Script.instance is not None:
       raise Fail("An instantiation already exists! Use, get_instance() method.")

+ 7 - 4
ambari-logsearch/ambari-logsearch-web/pom.xml

@@ -69,13 +69,16 @@
             </configuration>
           </execution>
           <execution>
-            <id>generate dist</id>
-            <phase>generate-resources</phase>
+            <id>webpack build</id>
             <goals>
-             <goal>yarn</goal>
+              <goal>webpack</goal>
             </goals>
+            <!-- optional: the default phase is "generate-resources" -->
+            <phase>generate-resources</phase>
             <configuration>
-              <arguments>build-prod</arguments>
+              <!-- optional: if not specified, it will run webpack's default
+              build (and you can remove this whole <configuration> section.) -->
+              <arguments>-p</arguments>
             </configuration>
           </execution>
           <execution>

+ 17 - 10
ambari-metrics/ambari-metrics-flume-sink/src/test/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSinkTest.java

@@ -18,17 +18,21 @@
 
 package org.apache.hadoop.metrics2.sink.flume;
 
+import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.powermock.api.easymock.PowerMock.mockStatic;
 import static org.powermock.api.easymock.PowerMock.replay;
-import static org.powermock.api.easymock.PowerMock.replayAll;
 import static org.powermock.api.easymock.PowerMock.resetAll;
 import static org.powermock.api.easymock.PowerMock.verifyAll;
 
 import java.net.InetAddress;
 import java.util.Collections;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.flume.Context;
 import org.apache.flume.instrumentation.util.JMXPollUtil;
@@ -43,7 +47,7 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
 @RunWith(PowerMockRunner.class)
-@PrepareForTest(JMXPollUtil.class)
+@PrepareForTest({JMXPollUtil.class, Executors.class, FlumeTimelineMetricsSink.class})
 public class FlumeTimelineMetricsSinkTest {
   @Test
   public void testNonNumericMetricMetricExclusion() throws InterruptedException {
@@ -76,7 +80,7 @@ public class FlumeTimelineMetricsSinkTest {
     flumeTimelineMetricsSink.setMetricsCaches(Collections.singletonMap("SINK",timelineMetricsCache));
     EasyMock.expect(timelineMetricsCache.getTimelineMetric("key1"))
         .andReturn(new TimelineMetric()).once();
-    timelineMetricsCache.putTimelineMetric(EasyMock.anyObject(TimelineMetric.class));
+    timelineMetricsCache.putTimelineMetric(anyObject(TimelineMetric.class));
     EasyMock.expectLastCall().once();
     return timelineMetricsCache;
   }
@@ -86,15 +90,18 @@ public class FlumeTimelineMetricsSinkTest {
     FlumeTimelineMetricsSink flumeTimelineMetricsSink = new FlumeTimelineMetricsSink();
     TimelineMetricsCache timelineMetricsCache = getTimelineMetricsCache(flumeTimelineMetricsSink);
     flumeTimelineMetricsSink.setPollFrequency(1);
-    mockStatic(JMXPollUtil.class);
-    EasyMock.expect(JMXPollUtil.getAllMBeans()).andReturn(
-        Collections.singletonMap("component1", Collections.singletonMap("key1", "42"))).once();
-    flumeTimelineMetricsSink.start();
-    flumeTimelineMetricsSink.stop();
-    replay(JMXPollUtil.class, timelineMetricsCache);
+    mockStatic(Executors.class);
+    ScheduledExecutorService executor = createNiceMock(ScheduledExecutorService.class);
+    expect(Executors.newSingleThreadScheduledExecutor()).andReturn(executor);
+    FlumeTimelineMetricsSink.TimelineMetricsCollector collector = anyObject();
+    TimeUnit unit = anyObject();
+    expect(executor.scheduleWithFixedDelay(collector, eq(0), eq(1), unit)).andReturn(null);
+    executor.shutdown();
+    replay(timelineMetricsCache, Executors.class, executor);
+
     flumeTimelineMetricsSink.start();
-    Thread.sleep(5);
     flumeTimelineMetricsSink.stop();
+
     verifyAll();
   }
 

+ 10 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java

@@ -89,6 +89,7 @@ import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.PropertyInfo.PropertyType;
+import org.apache.ambari.server.state.RefreshCommandConfiguration;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -507,6 +508,15 @@ public class AmbariCustomCommandExecutionHelper {
       StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
       roleParams.put(COMPONENT_CATEGORY, componentInfo.getCategory());
 
+      // set reconfigureAction in case of a RECONFIGURE command if there are any
+      if (commandName.equals("RECONFIGURE")) {
+        String refreshConfigsCommand = configHelper.getRefreshConfigsCommand(cluster, hostName, serviceName, componentName);
+        if (refreshConfigsCommand != null && !refreshConfigsCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+              LOG.info("Refreshing configs for {}/{} with command: ", componentName, hostName, refreshConfigsCommand);
+          commandParams.put("reconfigureAction", refreshConfigsCommand);
+        }
+      }
+
       execCmd.setCommandParams(commandParams);
       execCmd.setRoleParams(roleParams);
 

+ 4 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -1215,7 +1215,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     try {
       cluster = clusters.getCluster(request.getClusterName());
     } catch (ClusterNotFoundException e) {
-      LOG.error("Cluster not found ", e);
+      LOG.info(e.getMessage());
       throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
     }
 
@@ -4896,7 +4896,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       properties = ambariMetaInfo.getServiceProperties(stackName, stackVersion, serviceName);
     }
     for (PropertyInfo property: properties) {
-      response.add(property.convertToResponse());
+      if (property.shouldBeConfigured()) {
+        response.add(property.convertToResponse());
+      }
     }
 
     return response;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java

@@ -70,7 +70,7 @@ public class AmbariManagementHelper {
    */
   public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
     validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
-    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionHelper.validateCreateLink(stackManager, stackInfo, extensionInfo);
     ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
     stackManager.linkStackToExtension(stackInfo, extensionInfo);
 

+ 1 - 8
ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java

@@ -442,12 +442,6 @@ public interface KerberosHelper {
    * @param hostFilter             a set of hostname indicating the set of hosts to process -
    *                               if null, no filter is relevant; if empty, the filter
    *                               indicates no relevant hosts
-   * @param identityFilter         a Collection of identity names indicating the relevant
-   *                               identities - if null, no filter is relevant; if empty,
-   *                               the filter indicates no relevant identities
-   * @param shouldProcessCommand   a Command implementation to determine if the relevant component
-   *                               is in a state in which is should be process for the current
-   *                               Kerberos operation.
    * @return a list of ServiceComponentHost instances and should be processed during the relevant
    * Kerberos operation.
    * @throws AmbariException
@@ -455,8 +449,7 @@ public interface KerberosHelper {
   List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
                                                                KerberosDescriptor kerberosDescriptor,
                                                                Map<String, ? extends Collection<String>> serviceComponentFilter,
-                                                               Collection<String> hostFilter, Collection<String> identityFilter,
-                                                               Command<Boolean, ServiceComponentHost> shouldProcessCommand)
+                                                               Collection<String> hostFilter)
       throws AmbariException;
 
   Set<String> getHostsWithValidKerberosClient(Cluster cluster) throws AmbariException;

+ 152 - 92
ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java

@@ -122,6 +122,7 @@ import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -268,10 +269,12 @@ public class KerberosHelperImpl implements KerberosHelper {
               boolean updateConfigurations = !requestProperties.containsKey(DIRECTIVE_IGNORE_CONFIGS)
                   || !"true".equalsIgnoreCase(requestProperties.get(DIRECTIVE_IGNORE_CONFIGS));
 
+              boolean forceAllHosts = (hostFilter == null) || (hostFilter.contains("*"));
+
               if ("true".equalsIgnoreCase(value) || "all".equalsIgnoreCase(value)) {
-                handler = new CreatePrincipalsAndKeytabsHandler(true, updateConfigurations, true);
+                handler = new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.RECREATE_ALL, updateConfigurations, forceAllHosts, true);
               } else if ("missing".equalsIgnoreCase(value)) {
-                handler = new CreatePrincipalsAndKeytabsHandler(false, updateConfigurations, true);
+                handler = new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.CREATE_MISSING, updateConfigurations, forceAllHosts, true);
               }
 
               if (handler != null) {
@@ -326,7 +329,7 @@ public class KerberosHelperImpl implements KerberosHelper {
         if (serviceComponentsArray.length == 2) {
           serviceComponentFilter.put(serviceName, ImmutableSet.copyOf(serviceComponentsArray[1].split(";")));
         } else {
-          serviceComponentFilter.put(serviceName, null);
+          serviceComponentFilter.put(serviceName, ImmutableSet.of("*"));
         }
       }
       return serviceComponentFilter.build();
@@ -340,7 +343,7 @@ public class KerberosHelperImpl implements KerberosHelper {
                                                 RequestStageContainer requestStageContainer, Boolean manageIdentities)
       throws AmbariException, KerberosOperationException {
     return handle(cluster, getKerberosDetails(cluster, manageIdentities), serviceComponentFilter, hostFilter, identityFilter,
-        hostsToForceKerberosOperations, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(false, false,
+        hostsToForceKerberosOperations, requestStageContainer, new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.DEFAULT, false, false,
             false));
   }
 
@@ -1061,7 +1064,7 @@ public class KerberosHelperImpl implements KerberosHelper {
                                                   RequestStageContainer requestStageContainer)
       throws KerberosOperationException, AmbariException {
     return handleTestIdentity(cluster, getKerberosDetails(cluster, null), commandParamsStage, requestStageContainer,
-        new CreatePrincipalsAndKeytabsHandler(false, false, false));
+        new CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType.DEFAULT, false, false, false));
   }
 
   @Override
@@ -1230,27 +1233,25 @@ public class KerberosHelperImpl implements KerberosHelper {
   public List<ServiceComponentHost> getServiceComponentHostsToProcess(final Cluster cluster,
                                                                       final KerberosDescriptor kerberosDescriptor,
                                                                       final Map<String, ? extends Collection<String>> serviceComponentFilter,
-                                                                      final Collection<String> hostFilter, Collection<String> identityFilter,
-                                                                      final Command<Boolean, ServiceComponentHost> shouldProcessCommand)
+                                                                      final Collection<String> hostFilter)
       throws AmbariException {
     return getServiceComponentHosts(cluster, new Command<Boolean, ServiceComponentHost>() {
       @Override
       public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
         if (sch != null) {
           // Check the host filter
-          if ((hostFilter == null) || hostFilter.contains(sch.getHostName())) {
+          if ((hostFilter == null) || hostFilter.contains("*") || hostFilter.contains(sch.getHostName())) {
             String serviceName = sch.getServiceName();
 
             // Check the service filter
-            if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey(serviceName)) {
+            if ((serviceComponentFilter == null) || serviceComponentFilter.containsKey("*") || serviceComponentFilter.containsKey(serviceName)) {
               KerberosServiceDescriptor serviceDescriptor = kerberosDescriptor.getService(serviceName);
 
               if (serviceDescriptor != null) {
-                Collection<String> componentFilter = (serviceComponentFilter == null) ? null : serviceComponentFilter.get(serviceName);
+                Collection<String> componentFilter = ((serviceComponentFilter == null) || serviceComponentFilter.containsKey("*")) ? null : serviceComponentFilter.get(serviceName);
 
-                // Check the service/component filter and the shouldProcessCommand
-                return (((componentFilter == null) || componentFilter.contains(sch.getServiceComponentName())) &&
-                    ((shouldProcessCommand == null) || shouldProcessCommand.invoke(sch)));
+                // Check the service/component filter
+                return (((componentFilter == null) || componentFilter.contains("*") || componentFilter.contains(sch.getServiceComponentName())));
               }
             }
           }
@@ -1491,8 +1492,9 @@ public class KerberosHelperImpl implements KerberosHelper {
 
     if (identities != null) {
       for (KerberosIdentityDescriptor identity : identities) {
-        // If there is no filter or the filter contains the current identity's name...
-        if ((identityFilter == null) || identityFilter.contains(identity.getName())) {
+        // If there is no filter or the filter contains the current identity's path...
+        if ((identityFilter == null) || identityFilter.contains(identity.getPath())) {
+
           KerberosPrincipalDescriptor principalDescriptor = identity.getPrincipalDescriptor();
           String principal = null;
           String principalType = null;
@@ -2030,10 +2032,7 @@ public class KerberosHelperImpl implements KerberosHelper {
         cluster,
         kerberosDescriptor,
         serviceComponentFilter,
-        hostFilter,
-        identityFilter,
-        arg -> true);
-
+        hostFilter);
 
     // While iterating over all the ServiceComponentHosts find hosts that have KERBEROS_CLIENT
     // components in the INSTALLED state and add them to the hostsWithValidKerberosClient Set.
@@ -3378,12 +3377,11 @@ public class KerberosHelperImpl implements KerberosHelper {
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
-    public void addDistributeKeytabFilesStage(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts,
-                                              String clusterHostInfoJson, String hostParamsJson,
-                                              Map<String, String> commandParameters,
-                                              RoleCommandOrder roleCommandOrder,
-                                              RequestStageContainer requestStageContainer,
-                                              Set<String> hostsWithValidKerberosClient)
+    void addDistributeKeytabFilesStage(Cluster cluster, String clusterHostInfoJson,
+                                       String hostParamsJson, Map<String, String> commandParameters,
+                                       RoleCommandOrder roleCommandOrder,
+                                       RequestStageContainer requestStageContainer,
+                                       List<String> hosts)
         throws AmbariException {
 
       Stage stage = createNewStage(requestStageContainer.getLastStageId(),
@@ -3393,20 +3391,13 @@ public class KerberosHelperImpl implements KerberosHelper {
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
-      Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
-          new ArrayList<>(serviceComponentHosts), hostsWithValidKerberosClient);
-
-      if (!filteredComponents.isEmpty()) {
-        List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+      if (!hosts.isEmpty()) {
         Map<String, String> requestParams = new HashMap<>();
-        List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
-        RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
-        requestResourceFilters.add(reqResFilter);
 
         ActionExecutionContext actionExecContext = new ActionExecutionContext(
             cluster.getClusterName(),
             SET_KEYTAB,
-            requestResourceFilters,
+            createRequestResourceFilters(hosts),
             requestParams);
         customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage,
             requestParams, null);
@@ -3422,7 +3413,12 @@ public class KerberosHelperImpl implements KerberosHelper {
     /**
      * Send a custom command to the KERBEROS_CLIENT to check if there are missing keytabs on each hosts.
      */
-    public void addCheckMissingKeytabsStage(Cluster cluster, String clusterHostInfoJson, String hostParamsJson, ServiceComponentHostServerActionEvent event, Map<String, String> commandParameters, RoleCommandOrder roleCommandOrder, RequestStageContainer requestStageContainer, List<ServiceComponentHost> serviceComponentHosts) throws AmbariException {
+    void addCheckMissingKeytabsStage(Cluster cluster, String clusterHostInfoJson,
+                                     String hostParamsJson, Map<String, String> commandParameters,
+                                     RoleCommandOrder roleCommandOrder,
+                                     RequestStageContainer requestStageContainer,
+                                     List<String> hostsToInclude)
+        throws AmbariException {
       Stage stage = createNewStage(requestStageContainer.getLastStageId(),
           cluster,
           requestStageContainer.getId(),
@@ -3430,20 +3426,13 @@ public class KerberosHelperImpl implements KerberosHelper {
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
-      Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
-          new ArrayList<>(serviceComponentHosts), getHostsWithValidKerberosClient(cluster));
-
-      if (!filteredComponents.isEmpty()) {
-        List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+      if (!hostsToInclude.isEmpty()) {
         Map<String, String> requestParams = new HashMap<>();
-        List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
-        RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
-        requestResourceFilters.add(reqResFilter);
 
         ActionExecutionContext actionExecContext = new ActionExecutionContext(
           cluster.getClusterName(),
           CHECK_KEYTABS,
-          requestResourceFilters,
+          createRequestResourceFilters(hostsToInclude),
           requestParams);
         customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
       }
@@ -3454,32 +3443,6 @@ public class KerberosHelperImpl implements KerberosHelper {
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
-    /**
-     * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
-     * <p/>
-     * It is expected that the supplied collection is modifiable. It will be modified inplace.
-     *
-     * @param serviceComponentHosts a collection of ServiceComponentHost items to test
-     * @param hosts                 a set of host names indicating valid hosts
-     * @return a collection of filtered ServiceComponentHost items
-     */
-    private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
-                                                                                 Set<String> hosts) {
-
-      if ((serviceComponentHosts != null) && (hosts != null)) {
-        Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
-        while (iterator.hasNext()) {
-          ServiceComponentHost sch = iterator.next();
-
-          if (!hosts.contains(sch.getHostName())) {
-            iterator.remove();
-          }
-        }
-      }
-
-      return serviceComponentHosts;
-    }
-
     void addDisableSecurityHookStage(Cluster cluster,
                                      String clusterHostInfoJson,
                                      String hostParamsJson,
@@ -3677,6 +3640,13 @@ public class KerberosHelperImpl implements KerberosHelper {
       requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
+
+    private List<RequestResourceFilter> createRequestResourceFilters(List<String> hostsToInclude) {
+      List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+      RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToInclude);
+      requestResourceFilters.add(reqResFilter);
+      return requestResourceFilters;
+    }
   }
 
   /**
@@ -3746,6 +3716,8 @@ public class KerberosHelperImpl implements KerberosHelper {
           roleCommandOrder, requestStageContainer);
 
       if (kerberosDetails.manageIdentities()) {
+        List<String> hostsToInclude = calculateHosts(cluster, serviceComponentHosts, hostsWithValidKerberosClient, false);
+
         commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
 
         // *****************************************************************
@@ -3767,8 +3739,8 @@ public class KerberosHelperImpl implements KerberosHelper {
 
         // *****************************************************************
         // Create stage to distribute keytabs
-        addDistributeKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson, hostParamsJson,
-            commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
+        addDistributeKeytabFilesStage(cluster, clusterHostInfoJson, hostParamsJson, commandParameters,
+            roleCommandOrder, requestStageContainer, hostsToInclude);
       }
 
       // *****************************************************************
@@ -3885,10 +3857,11 @@ public class KerberosHelperImpl implements KerberosHelper {
    */
   private class CreatePrincipalsAndKeytabsHandler extends Handler {
     /**
-     * A boolean value indicating whether to create keytabs for all principals (<code>true</code>)
-     * or only the ones that are missing (<code>false</code>).
+     * The type of Kerberos operation being performed.
+     *
+     * @see org.apache.ambari.server.serveraction.kerberos.KerberosServerAction.OperationType
      */
-    private boolean regenerateAllKeytabs;
+    private KerberosServerAction.OperationType operationType;
 
     /**
      * A boolean value indicating whether to update service configurations (<code>true</code>)
@@ -3896,6 +3869,14 @@ public class KerberosHelperImpl implements KerberosHelper {
      */
     private boolean updateConfigurations;
 
+    /**
+     * A boolean value indicating whether to include all hosts (<code>true</code>) when setting up
+     * agent-side tasks or to select only the hosts found to be relevant (<code>false</code>).
+     * <p>
+     * This is useful if we do not know beforehand, which hosts need to be involved in the operation.
+     */
+    private boolean forceAllHosts;
+
     /**
      * A boolean value indicating whether to include Ambari server identity (<code>true</code>)
      * or ignore it (<code>false</code>).
@@ -3906,17 +3887,20 @@ public class KerberosHelperImpl implements KerberosHelper {
      * CreatePrincipalsAndKeytabsHandler constructor to set whether this instance should be used to
      * regenerate all keytabs or just the ones that have not been distributed
      *
-     * @param regenerateAllKeytabs A boolean value indicating whether to create keytabs for all
-     *                             principals (<code>true</code> or only the ones that are missing
-     *                             (<code>false</code>)
-     * @param updateConfigurations A boolean value indicating whether to update service configurations
-     *                             (<code>true</code>) or ignore any potential configuration changes
-     *                             (<code>false</code>)
+     * @param operationType         The type of Kerberos operation being performed
+     * @param updateConfigurations  A boolean value indicating whether to update service configurations
+     *                              (<code>true</code>) or ignore any potential configuration changes
+     * @param forceAllHosts         A boolean value indicating whether to include all hosts (<code>true</code>)
+     *                              when setting up agent-side tasks or to select only the hosts found to be
+     *                              relevant (<code>false</code>)
+     * @param includeAmbariIdentity A boolean value indicating whether to include Ambari server
+     *                              identity (<code>true</code>) or ignore it (<code>false</code>)
      */
-    public CreatePrincipalsAndKeytabsHandler(boolean regenerateAllKeytabs, boolean updateConfigurations,
-                                             boolean includeAmbariIdentity) {
-      this.regenerateAllKeytabs = regenerateAllKeytabs;
+    CreatePrincipalsAndKeytabsHandler(KerberosServerAction.OperationType operationType, boolean updateConfigurations,
+                                      boolean forceAllHosts, boolean includeAmbariIdentity) {
+      this.operationType = operationType;
       this.updateConfigurations = updateConfigurations;
+      this.forceAllHosts = forceAllHosts;
       this.includeAmbariIdentity = includeAmbariIdentity;
     }
 
@@ -3947,6 +3931,7 @@ public class KerberosHelperImpl implements KerberosHelper {
       }
 
 
+      boolean processAmbariIdentity = includeAmbariIdentity;
       Map<String, String> commandParameters = new HashMap<>();
       commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, ambariManagementController.getAuthName());
       commandParameters.put(KerberosServerAction.DEFAULT_REALM, kerberosDetails.getDefaultRealm());
@@ -3955,22 +3940,29 @@ public class KerberosHelperImpl implements KerberosHelper {
       }
       if (serviceComponentFilter != null) {
         commandParameters.put(KerberosServerAction.SERVICE_COMPONENT_FILTER, StageUtils.getGson().toJson(serviceComponentFilter));
+
+        processAmbariIdentity = serviceComponentFilter.containsKey("AMBARI") &&
+            ((serviceComponentFilter.get("AMBARI") == null) || serviceComponentFilter.get("AMBARI").contains("*") || serviceComponentFilter.get("AMBARI").contains("AMBARI_SERVER"));
       }
       if (hostFilter != null) {
         commandParameters.put(KerberosServerAction.HOST_FILTER, StageUtils.getGson().toJson(hostFilter));
+
+        processAmbariIdentity = hostFilter.contains("*") || hostFilter.contains(StageUtils.getHostName());
       }
       if (identityFilter != null) {
         commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identityFilter));
       }
 
-      commandParameters.put(KerberosServerAction.REGENERATE_ALL, (regenerateAllKeytabs) ? "true" : "false");
-      commandParameters.put(KerberosServerAction.INCLUDE_AMBARI_IDENTITY, (includeAmbariIdentity) ? "true" : "false");
+      commandParameters.put(KerberosServerAction.OPERATION_TYPE, (operationType == null) ? KerberosServerAction.OperationType.DEFAULT.name() : operationType.name());
+      commandParameters.put(KerberosServerAction.INCLUDE_AMBARI_IDENTITY, (processAmbariIdentity) ? "true" : "false");
 
       if (updateConfigurations) {
         commandParameters.put(KerberosServerAction.UPDATE_CONFIGURATION_NOTE, "Updated Kerberos-related configurations");
         commandParameters.put(KerberosServerAction.UPDATE_CONFIGURATIONS, "true");
       }
 
+      List<String> hostsToInclude = calculateHosts(cluster, serviceComponentHosts, hostsWithValidKerberosClient, forceAllHosts);
+
       // *****************************************************************
       // Create stage to create principals
       addPrepareKerberosIdentitiesStage(cluster, clusterHostInfoJson, hostParamsJson, event,
@@ -3979,9 +3971,9 @@ public class KerberosHelperImpl implements KerberosHelper {
       if (kerberosDetails.manageIdentities()) {
         commandParameters.put(KerberosServerAction.KDC_TYPE, kerberosDetails.getKdcType().name());
 
-        if (!regenerateAllKeytabs) {
-          addCheckMissingKeytabsStage(cluster, clusterHostInfoJson, hostParamsJson, event,
-              commandParameters, roleCommandOrder, requestStageContainer, serviceComponentHosts);
+        if (operationType != KerberosServerAction.OperationType.RECREATE_ALL) {
+          addCheckMissingKeytabsStage(cluster, clusterHostInfoJson, hostParamsJson,
+              commandParameters, roleCommandOrder, requestStageContainer, hostsToInclude);
         }
 
         // *****************************************************************
@@ -3996,15 +3988,15 @@ public class KerberosHelperImpl implements KerberosHelper {
 
         // *****************************************************************
         // Create stage to distribute and configure keytab for Ambari server and configure JAAS
-        if (includeAmbariIdentity && kerberosDetails.createAmbariPrincipal()) {
+        if (processAmbariIdentity && kerberosDetails.createAmbariPrincipal()) {
           addConfigureAmbariIdentityStage(cluster, clusterHostInfoJson, hostParamsJson, event, commandParameters,
               roleCommandOrder, requestStageContainer);
         }
 
         // *****************************************************************
         // Create stage to distribute keytabs
-        addDistributeKeytabFilesStage(cluster, serviceComponentHosts, clusterHostInfoJson,
-            hostParamsJson, commandParameters, roleCommandOrder, requestStageContainer, hostsWithValidKerberosClient);
+        addDistributeKeytabFilesStage(cluster, clusterHostInfoJson, hostParamsJson, commandParameters,
+            roleCommandOrder, requestStageContainer, hostsToInclude);
       }
 
       if (updateConfigurations) {
@@ -4018,6 +4010,74 @@ public class KerberosHelperImpl implements KerberosHelper {
     }
   }
 
+  /**
+   * Filter out ServiceComponentHosts that are on on hosts in the specified set of host names.
+   * <p/>
+   * It is expected that the supplied collection is modifiable. It will be modified inplace.
+   *
+   * @param serviceComponentHosts a collection of ServiceComponentHost items to test
+   * @param hosts                 a set of host names indicating valid hosts
+   * @return a collection of filtered ServiceComponentHost items
+   */
+  private Collection<ServiceComponentHost> filterServiceComponentHostsForHosts(Collection<ServiceComponentHost> serviceComponentHosts,
+                                                                               Set<String> hosts) {
+
+    if ((serviceComponentHosts != null) && (hosts != null)) {
+      Iterator<ServiceComponentHost> iterator = serviceComponentHosts.iterator();
+      while (iterator.hasNext()) {
+        ServiceComponentHost sch = iterator.next();
+
+        if (!hosts.contains(sch.getHostName())) {
+          iterator.remove();
+        }
+      }
+    }
+
+    return serviceComponentHosts;
+  }
+
+  /**
+   * Calculate the hosts to include when issuing agent-side commands.
+   * <p>
+   * If forcing all hosts, select only the healthy hosts in the cluster else select only the healthy
+   * hosts from the set of hosts specified in the collection of relevant {@link ServiceComponentHost}.
+   *
+   * @param cluster                      the cluster
+   * @param serviceComponentHosts        a collction of {@link ServiceComponentHost}s that are
+   *                                     relevant to the current operation
+   * @param hostsWithValidKerberosClient the collection of hosts know to have the Kerberos client
+   *                                     component installed
+   * @param forceAllHosts                true to process all hosts from the cluster rather than use
+   *                                     the hosts parsed from the set of {@link ServiceComponentHost}s
+   * @return a filtered list of host names
+   * @throws AmbariException
+   */
+  private List<String> calculateHosts(Cluster cluster, List<ServiceComponentHost> serviceComponentHosts, Set<String> hostsWithValidKerberosClient, boolean forceAllHosts) throws AmbariException {
+    if(forceAllHosts) {
+      List<String> hosts = new ArrayList<>();
+      Collection<Host> clusterHosts = cluster.getHosts();
+      if(!CollectionUtils.isEmpty(clusterHosts)) {
+        for(Host host: clusterHosts) {
+          if(host.getState() == HostState.HEALTHY) {
+            hosts.add(host.getHostName());
+          }
+        }
+      }
+
+      return hosts;
+    }
+    else {
+      Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
+          new ArrayList<>(serviceComponentHosts), hostsWithValidKerberosClient);
+
+      if (filteredComponents.isEmpty()) {
+        return Collections.emptyList();
+      } else {
+        return createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+      }
+    }
+  }
+
   /**
    * DeletePrincipalsAndKeytabsHandler is an implementation of the Handler interface used to delete
    * principals and keytabs throughout the cluster.

+ 15 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java

@@ -46,6 +46,7 @@ public class ServiceComponentHostResponse {
   private String desiredRepositoryVersion;
   private String desiredState;
   private boolean staleConfig = false;
+  private boolean reloadConfig = false;
   private String adminState = null;
   private String maintenanceState = null;
   private UpgradeState upgradeState = UpgradeState.NONE;
@@ -394,6 +395,20 @@ public class ServiceComponentHostResponse {
     staleConfig = stale;
   }
 
+  /**
+   * @return true if configs are reloadable without RESTART command
+   */
+  public boolean isReloadConfig() {
+    return reloadConfig;
+  }
+
+  /**
+   * @param reloadConfig
+   */
+  public void setReloadConfig(boolean reloadConfig) {
+    this.reloadConfig = reloadConfig;
+  }
+
   /**
    * @return the maintenance state
    */

+ 60 - 21
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -32,6 +32,7 @@ import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -1367,11 +1368,56 @@ public class BlueprintConfigurationProcessor {
                                              ClusterTopology topology);
   }
 
+  private static class HostGroupUpdater implements PropertyUpdater {
+
+    public static final PropertyUpdater INSTANCE = new HostGroupUpdater();
+
+    @Override
+    public String updateForClusterCreate(String propertyName,
+      String origValue,
+      Map<String, Map<String, String>> properties,
+      ClusterTopology topology) {
+
+      //todo: getHostStrings
+      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
+      if (m.find()) {
+        String hostGroupName = m.group(1);
+
+        HostGroupInfo groupInfo = topology.getHostGroupInfo().get(hostGroupName);
+        if (groupInfo == null) {
+          //todo: this should be validated in configuration validation
+          throw new RuntimeException(
+            "Encountered a host group token in configuration which couldn't be matched to a host group: "
+              + hostGroupName);
+        }
+
+        //todo: warn if > hosts
+        return origValue.replace(m.group(0), groupInfo.getHostNames().iterator().next());
+      }
+
+      return origValue;
+    }
+
+    @Override
+    public Collection<String> getRequiredHostGroups(String propertyName,
+      String origValue,
+      Map<String, Map<String, String>> properties,
+      ClusterTopology topology) {
+      //todo: getHostStrings
+      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
+      if (m.find()) {
+        String hostGroupName = m.group(1);
+        return Collections.singleton(hostGroupName);
+      }
+      return Collections.emptySet();
+    }
+  }
+
   /**
    * Topology based updater which replaces the original host name of a property with the host name
    * which runs the associated (master) component in the new cluster.
    */
-  private static class SingleHostTopologyUpdater implements PropertyUpdater {
+  private static class SingleHostTopologyUpdater extends HostGroupUpdater {
     /**
      * Component name
      */
@@ -1402,21 +1448,9 @@ public class BlueprintConfigurationProcessor {
                                          Map<String, Map<String, String>> properties,
                                          ClusterTopology topology)  {
 
-      //todo: getHostStrings
-      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
-      if (m.find()) {
-        String hostGroupName = m.group(1);
-
-        HostGroupInfo groupInfo = topology.getHostGroupInfo().get(hostGroupName);
-        if (groupInfo == null) {
-          //todo: this should be validated in configuration validation
-          throw new RuntimeException(
-              "Encountered a host group token in configuration which couldn't be matched to a host group: "
-              + hostGroupName);
-        }
-
-        //todo: warn if > hosts
-        return origValue.replace(m.group(0), groupInfo.getHostNames().iterator().next());
+      String replacedValue = super.updateForClusterCreate(propertyName, origValue, properties, topology);
+      if (!Objects.equals(origValue, replacedValue)) {
+        return replacedValue;
       } else {
         int matchingGroupCount = topology.getHostGroupsForComponent(component).size();
         if (matchingGroupCount == 1) {
@@ -1525,11 +1559,9 @@ public class BlueprintConfigurationProcessor {
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
                                                     ClusterTopology topology) {
-      //todo: getHostStrings
-      Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
-      if (m.find()) {
-        String hostGroupName = m.group(1);
-        return Collections.singleton(hostGroupName);
+      Collection<String> result = super.getRequiredHostGroups(propertyName, origValue, properties, topology);
+      if (!result.isEmpty()) {
+        return result;
       } else {
         Collection<String> matchingGroups = topology.getHostGroupsForComponent(component);
         int matchingGroupCount = matchingGroups.size();
@@ -2351,6 +2383,7 @@ public class BlueprintConfigurationProcessor {
     allUpdaters.add(nonTopologyUpdaters);
 
     Map<String, PropertyUpdater> amsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> druidCommon = new HashMap<>();
     Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<>();
     Map<String, PropertyUpdater> mapredSiteMap = new HashMap<>();
     Map<String, PropertyUpdater> coreSiteMap = new HashMap<>();
@@ -2404,6 +2437,7 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<>();
 
     singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
+    singleHostTopologyUpdaters.put("druid-common", druidCommon);
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
     singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap);
     singleHostTopologyUpdaters.put("core-site", coreSiteMap);
@@ -2775,6 +2809,11 @@ public class BlueprintConfigurationProcessor {
         }
       }
     });
+
+    // DRUID
+    druidCommon.put("metastore_hostname", HostGroupUpdater.INSTANCE);
+    druidCommon.put("druid.metadata.storage.connector.connectURI", HostGroupUpdater.INSTANCE);
+    druidCommon.put("druid.zk.service.host", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
   }
 
   private static void addUnitPropertyUpdaters() {

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java

@@ -102,6 +102,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_repository_version";
   public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "actual_configs";
   public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "stale_configs";
+  public static final String HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "reload_configs");
   public static final String HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "desired_admin_state";
   public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "maintenance_state";
   public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "upgrade_state";
@@ -264,6 +265,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID, response.getDesiredStackVersion(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID, response.getActualConfigs(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID, response.isStaleConfig(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_RELOAD_CONFIGS_PROPERTY_ID, response.isReloadConfig(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID, response.getUpgradeState(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, response.getDesiredRepositoryVersion(), requestedIds);
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/RemovableIdentities.java

@@ -133,7 +133,7 @@ public class RemovableIdentities {
    * Remove all identities which are not used by other services or components
    */
   public void remove(KerberosHelper kerberosHelper) throws AmbariException, KerberosOperationException {
-    Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getName).collect(toSet());
+    Set<String> identitiesToRemove = skipUsed().stream().map(KerberosIdentityDescriptor::getPath).collect(toSet());
     if (!identitiesToRemove.isEmpty()) {
       kerberosHelper.deleteIdentities(cluster, components, identitiesToRemove);
     }

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java

@@ -48,6 +48,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.StackId;
 import org.slf4j.Logger;
@@ -115,6 +116,10 @@ public class HostVersionOutOfSyncListener {
       List<HostVersionEntity> hostVersionEntities =
           hostVersionDAO.get().findByClusterAndHost(cluster.getClusterName(), event.getHostName());
 
+      Service service = cluster.getService(event.getServiceName());
+      ServiceComponent serviceComponent = service.getServiceComponent(event.getComponentName());
+      RepositoryVersionEntity componentRepo = serviceComponent.getDesiredRepositoryVersion();
+
       for (HostVersionEntity hostVersionEntity : hostVersionEntities) {
         StackEntity hostStackEntity = hostVersionEntity.getRepositoryVersion().getStack();
         StackId hostStackId = new StackId(hostStackEntity);
@@ -138,6 +143,12 @@ public class HostVersionOutOfSyncListener {
           continue;
         }
 
+        // !!! we shouldn't be changing other versions to OUT_OF_SYNC if the event
+        // component repository doesn't match
+        if (!hostVersionEntity.getRepositoryVersion().equals(componentRepo)) {
+          continue;
+        }
+
         switch (hostVersionEntity.getState()) {
           case INSTALLED:
           case NOT_REQUIRED:

+ 1 - 0
ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java

@@ -65,6 +65,7 @@ public class ActionMetadata {
     defaultHostComponentCommands.add("CONFIGURE");
     defaultHostComponentCommands.add("CONFIGURE_FUNCTION");
     defaultHostComponentCommands.add("DISABLE_SECURITY");
+    defaultHostComponentCommands.add("RECONFIGURE");
   }
 
   private void fillServiceClients() {

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreateKeytabFilesServerAction.java

@@ -217,7 +217,7 @@ public class CreateKeytabFilesServerAction extends KerberosServerAction {
                 return commandReport;
               }
 
-              boolean regenerateKeytabs = "true".equalsIgnoreCase(getCommandParameterValue(getCommandParameters(), REGENERATE_ALL));
+              boolean regenerateKeytabs = getOperationType(getCommandParameters()) == OperationType.RECREATE_ALL;
               boolean onlyKeytabWrite = "true".equalsIgnoreCase(identityRecord.get(KerberosIdentityDataFileReader.ONLY_KEYTAB_WRITE));
               boolean grabKeytabFromCache = regenerateKeytabs && onlyKeytabWrite;
               // if grabKeytabFromCache=true we will try to get keytab from cache and send to agent, it will be true for

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/CreatePrincipalsServerAction.java

@@ -128,7 +128,7 @@ public class CreatePrincipalsServerAction extends KerberosServerAction {
       seenPrincipals.add(evaluatedPrincipal);
 
       boolean processPrincipal;
-      boolean regenerateKeytabs = "true".equalsIgnoreCase(getCommandParameterValue(getCommandParameters(), REGENERATE_ALL));
+      boolean regenerateKeytabs = getOperationType(getCommandParameters()) == OperationType.RECREATE_ALL;
 
       if (regenerateKeytabs) {
         // do not process cached identities that can be passed as is(headless identities)

+ 57 - 14
ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java

@@ -36,6 +36,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -108,31 +109,32 @@ public abstract class KerberosServerAction extends AbstractServerAction {
    */
   public static final String DATA_DIRECTORY_PREFIX = ".ambari_";
 
-  /*
+  /**
    * Kerberos action shared data entry name for the principal-to-password map
    */
   private static final String PRINCIPAL_PASSWORD_MAP = "principal_password_map";
 
-  /*
+  /**
    * Kerberos action shared data entry name for the principal-to-key_number map
    */
   private static final String PRINCIPAL_KEY_NUMBER_MAP = "principal_key_number_map";
 
-  /*
-  * Key used in kerberosCommandParams in ExecutionCommand for base64 encoded keytab content
-  */
+  /**
+   * Key used in kerberosCommandParams in ExecutionCommand for base64 encoded keytab content
+   */
   public static final String KEYTAB_CONTENT_BASE64 = "keytab_content_base64";
 
-  /*
-  * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to generate key keytabs
-  * for all principals ("true") or only those that are missing ("false")
-  */
-  public static final String REGENERATE_ALL = "regenerate_all";
+  /**
+   * Key used in kerberosCommandParams in ExecutionCommand to indicate why type of creation operation to perform.
+   *
+   * @see OperationType
+   */
+  public static final String OPERATION_TYPE = "operation_type";
 
-  /*
-  * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to include Ambari server indetity
-  * ("true") or ignore it ("false")
-  */
+  /**
+   * Key used in kerberosCommandParams in ExecutionCommand to indicate whether to include Ambari server indetity
+   * ("true") or ignore it ("false")
+   */
   public static final String INCLUDE_AMBARI_IDENTITY = "include_ambari_identity";
 
   /**
@@ -218,6 +220,22 @@ public abstract class KerberosServerAction extends AbstractServerAction {
     return getCommandParameterValue(commandParameters, DATA_DIRECTORY);
   }
 
+  /**
+   * Given a (command parameter) Map, attempts to safely retrieve the "operation_type" property.
+   *
+   * @param commandParameters a Map containing the dictionary of data to interrogate
+   * @return an OperationType
+   */
+  protected static OperationType getOperationType(Map<String, String> commandParameters) {
+    String value = getCommandParameterValue(commandParameters, OPERATION_TYPE);
+    if(StringUtils.isEmpty(value)) {
+      return OperationType.DEFAULT;
+    }
+    else {
+      return OperationType.valueOf(value.toUpperCase());
+    }
+  }
+
   /**
    * Sets the shared principal-to-password Map used to store principals and generated password for
    * use within the current request context.
@@ -569,4 +587,29 @@ public abstract class KerberosServerAction extends AbstractServerAction {
       }
     }
   }
+
+  /**
+   * A Kerberos operation type
+   * <ul>
+   * <li>RECREATE_ALL - regenerate keytabs for all principals</li>
+   * <li>CREATE_MISSING - generate keytabs for only those that are missing</li>
+   * <li>DEFAULT - generate needed keytabs for new components</li>
+   * </ul>
+   */
+  public enum OperationType {
+    /**
+     * Regenerate keytabs for all principals
+     */
+    RECREATE_ALL,
+
+    /**
+     *  Generate keytabs for only those that are missing
+     */
+    CREATE_MISSING,
+
+    /**
+     * Generate needed keytabs for new components
+     */
+    DEFAULT
+  }
 }

+ 1 - 2
ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java

@@ -83,8 +83,7 @@ public class PrepareDisableKerberosServerAction extends AbstractPrepareKerberosS
     List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster,
         kerberosDescriptor,
         getServiceComponentFilter(),
-        null, identityFilter,
-      sch -> true);
+        null);
 
     Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
     Map<String, String> commandParameters = getCommandParameters();

+ 4 - 2
ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareEnableKerberosServerAction.java

@@ -92,8 +92,11 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
       }
     }
 
+    KerberosHelper kerberosHelper = getKerberosHelper();
+    Map<String, ? extends Collection<String>> serviceComponentFilter = getServiceComponentFilter();
+    Collection<String> hostFilter = getHostFilter();
     Collection<String> identityFilter = getIdentityFilter();
-    List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
+    List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster, kerberosDescriptor, serviceComponentFilter, hostFilter);
 
     String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
     Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
@@ -107,7 +110,6 @@ public class PrepareEnableKerberosServerAction extends PrepareKerberosIdentities
       actionLog.writeStdOut(String.format("Processing %d components", schCount));
     }
 
-    KerberosHelper kerberosHelper = getKerberosHelper();
     Map<String, Set<String>> propertiesToRemove = new HashMap<>();
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
     Set<String> services = cluster.getServices().keySet();

+ 109 - 33
ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareKerberosIdentitiesServerAction.java

@@ -32,8 +32,12 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -67,11 +71,22 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
       throw new AmbariException("Missing cluster object");
     }
 
+    KerberosHelper kerberosHelper = getKerberosHelper();
+
     KerberosDescriptor kerberosDescriptor = getKerberosDescriptor(cluster, false);
+    Map<String, String> commandParameters = getCommandParameters();
+    OperationType operationType = getOperationType(getCommandParameters());
+
+    Map<String, ? extends Collection<String>> serviceComponentFilter = getServiceComponentFilter();
+    Collection<String> hostFilter = getHostFilter();
     Collection<String> identityFilter = getIdentityFilter();
-    List<ServiceComponentHost> schToProcess = getServiceComponentHostsToProcess(cluster, kerberosDescriptor, identityFilter);
+    // If the operationType is default, use the getServiceComponentHostsToProcess method to determine
+    // which ServiceComponentHosts to process based on the filters.  However if we are regenerating
+    // keytabs for a specific set of components, build the identity filter below so we can
+    // customized what needs to be done.
+    List<ServiceComponentHost> schToProcess = kerberosHelper.getServiceComponentHostsToProcess(cluster, kerberosDescriptor,
+        (operationType == OperationType.DEFAULT) ? serviceComponentFilter : null, hostFilter);
 
-    Map<String, String> commandParameters = getCommandParameters();
     String dataDirectory = getCommandParameterValue(commandParameters, DATA_DIRECTORY);
     Map<String, Map<String, String>> kerberosConfigurations = new HashMap<>();
 
@@ -84,18 +99,32 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
       actionLog.writeStdOut(String.format("Processing %d components", schCount));
     }
 
-    KerberosHelper kerberosHelper = getKerberosHelper();
     Set<String> services = cluster.getServices().keySet();
     Map<String, Set<String>> propertiesToRemove = new HashMap<>();
     Map<String, Set<String>> propertiesToIgnore = new HashMap<>();
     boolean includeAmbariIdentity = "true".equalsIgnoreCase(getCommandParameterValue(commandParameters, KerberosServerAction.INCLUDE_AMBARI_IDENTITY));
 
+    // If we are including the Ambari identity; then ensure that if a host filter is set, do not the Ambari service identity.
+    includeAmbariIdentity &= (hostFilter == null);
+
+    if (serviceComponentFilter != null) {
+      // If we are including the Ambari identity; then ensure that if a service/component filter is set,
+      // it contains the AMBARI/AMBARI_SERVER component; else do not include the Ambari service identity.
+      includeAmbariIdentity &= (serviceComponentFilter.get("AMBARI") != null) && serviceComponentFilter.get("AMBARI").contains("AMBARI_SERVER");
+
+      if((operationType != OperationType.DEFAULT)) {
+        // Update the identity filter, if necessary
+        identityFilter = updateIdentityFilter(kerberosDescriptor, identityFilter, serviceComponentFilter);
+      }
+    }
+
     // Calculate the current host-specific configurations. These will be used to replace
     // variables within the Kerberos descriptor data
     Map<String, Map<String, String>> configurations = kerberosHelper.calculateConfigurations(cluster, null, kerberosDescriptor, false, false);
 
     processServiceComponentHosts(cluster, kerberosDescriptor, schToProcess, identityFilter, dataDirectory,
-        configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore, !CollectionUtils.isEmpty(getHostFilter()));
+        configurations, kerberosConfigurations, includeAmbariIdentity, propertiesToIgnore,
+        hostFilter != null);
 
     kerberosHelper.applyStackAdvisorUpdates(cluster, services, configurations, kerberosConfigurations,
         propertiesToIgnore, propertiesToRemove, true);
@@ -118,35 +147,6 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
     throw new UnsupportedOperationException();
   }
 
-  /**
-   * Calls {@link KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)}
-   * with no filter on ServiceComponentHosts
-   * <p/>
-   * The <code>shouldProcessCommand</code> implementation passed to KerberosHelper#getServiceComponentHostsToProcess
-   * always returns true, indicating to process all ServiceComponentHosts.
-   *
-   * @param cluster            the cluster
-   * @param kerberosDescriptor the current Kerberos descriptor
-   * @param identityFilter     a list of identities to include, or all if null  @return the list of ServiceComponentHosts to process
-   * @throws AmbariException
-   * @see KerberosHelper#getServiceComponentHostsToProcess(Cluster, KerberosDescriptor, Map, Collection, Collection, KerberosHelper.Command)
-   */
-  protected List<ServiceComponentHost> getServiceComponentHostsToProcess(Cluster cluster,
-                                                                         KerberosDescriptor kerberosDescriptor,
-                                                                         Collection<String> identityFilter)
-      throws AmbariException {
-    return getKerberosHelper().getServiceComponentHostsToProcess(cluster,
-        kerberosDescriptor,
-        getServiceComponentFilter(),
-        getHostFilter(), identityFilter,
-        new KerberosHelper.Command<Boolean, ServiceComponentHost>() {
-          @Override
-          public Boolean invoke(ServiceComponentHost sch) throws AmbariException {
-            return true;
-          }
-        });
-  }
-
   /**
    * Calls {@link KerberosHelper#getKerberosDescriptor(Cluster, boolean)}
    *
@@ -200,5 +200,81 @@ public class PrepareKerberosIdentitiesServerAction extends AbstractPrepareKerber
           calculatedConfiguration, kerberosConfigurations, includePreconfiguredData);
     }
   }
+
+  /**
+   * Iterate through the identities in the Kerberos descriptor to find the relevant identities to
+   * add to the identity filter.
+   * <p>
+   * The set of identities to include in the filter are determined by whether they are explicit
+   * identities set in a component or service in the supplied service/component filter.
+   *
+   * @param kerberosDescriptor     the Kerberos descriptor
+   * @param identityFilter         the existing identity filter
+   * @param serviceComponentFilter the service/component filter
+   * @return a new collection of paths (including any existing paths) to act as the updated identity filter
+   */
+  private Collection<String> updateIdentityFilter(KerberosDescriptor kerberosDescriptor,
+                                                  Collection<String> identityFilter,
+                                                  Map<String, ? extends Collection<String>> serviceComponentFilter) {
+
+    Set<String> updatedFilter = (identityFilter == null) ? new HashSet<>() : new HashSet<>(identityFilter);
+
+    Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+
+    if (serviceDescriptors != null) {
+      for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
+        String serviceName = serviceDescriptor.getName();
+
+        if (serviceComponentFilter.containsKey("*") || serviceComponentFilter.containsKey(serviceName)) {
+          Collection<String> componentFilter = serviceComponentFilter.get(serviceName);
+          boolean anyComponent = ((componentFilter == null) || componentFilter.contains("*"));
+
+          // Only include the service-wide identities if the component filter is null contains "*", which indicates
+          // that all component for the given service are to be processed.
+          if (anyComponent) {
+            addIdentitiesToFilter(serviceDescriptor.getIdentities(), updatedFilter, true);
+          }
+
+          Map<String, KerberosComponentDescriptor> componentDescriptors = serviceDescriptor.getComponents();
+          if (componentDescriptors != null) {
+            for (KerberosComponentDescriptor componentDescriptor : componentDescriptors.values()) {
+              String componentName = componentDescriptor.getName();
+              if (anyComponent || (componentFilter.contains(componentName))) {
+                addIdentitiesToFilter(componentDescriptor.getIdentities(), updatedFilter, true);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    return updatedFilter;
+  }
+
+  /**
+   * Add the path of each identity in the collection of identities to the supplied identity filter
+   * if that identity is not a reference to another identity or if references are allowed.
+   *  @param identityDescriptors the collection of identity descriptors to process
+   * @param identityFilter      the identity filter to modify
+   * @param skipReferences
+   */
+  private void addIdentitiesToFilter(List<KerberosIdentityDescriptor> identityDescriptors,
+                                     Collection<String> identityFilter, boolean skipReferences) {
+    if (!CollectionUtils.isEmpty(identityDescriptors)) {
+      for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+        if (!skipReferences || !identityDescriptor.isReference()) {
+          String identityPath = identityDescriptor.getPath();
+
+          if (!StringUtils.isEmpty(identityPath)) {
+            identityFilter.add(identityPath);
+
+            // Find and add the references TO this identity to ensure the new/updated keytab file is
+            // sent to the appropriate host(s)
+            addIdentitiesToFilter(identityDescriptor.findReferences(), identityFilter, false);
+          }
+        }
+      }
+    }
+  }
 }
 

+ 54 - 3
ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java

@@ -27,6 +27,8 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An extension version is like a stack version but it contains custom services.  Linking an extension
@@ -35,6 +37,8 @@ import org.apache.ambari.server.utils.VersionUtils;
  */
 public class ExtensionHelper {
 
+  private final static Logger LOG = LoggerFactory.getLogger(ExtensionHelper.class);
+
   public static void validateDeleteLink(Clusters clusters, StackInfo stack, ExtensionInfo extension) throws AmbariException {
     validateNotRequiredExtension(stack, extension);
     validateServicesNotInstalled(clusters, stack, extension);
@@ -62,9 +66,9 @@ public class ExtensionHelper {
     }
   }
 
-  public static void validateCreateLink(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  public static void validateCreateLink(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
     validateSupportedStackVersion(stack, extension);
-    validateServiceDuplication(stack, extension);
+    validateServiceDuplication(stackManager, stack, extension);
     validateRequiredExtensions(stack, extension);
   }
 
@@ -88,15 +92,24 @@ public class ExtensionHelper {
     throw new AmbariException(message);
   }
 
-  private static void validateServiceDuplication(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    LOG.debug("Looking for duplicate services");
     for (ServiceInfo service : extension.getServices()) {
+      LOG.debug("Looking for duplicate service " + service.getName());
       if (service != null) {
         ServiceInfo stackService = null;
         try {
           stackService = stack.getService(service.getName());
+          if (stackService != null) {
+            LOG.debug("Found service " + service.getName());
+            if (isInheritedExtensionService(stackManager, stack, service.getName(), extension.getName())) {
+              stackService = null;
+            }
+          }
         }
         catch (Exception e) {
           //Eat the exception
+          LOG.error("Error validating service duplication", e);
         }
         if (stackService != null) {
           String message = "Existing service is included in extension"
@@ -112,6 +125,44 @@ public class ExtensionHelper {
     }
   }
 
+  private static boolean isInheritedExtensionService(StackManager stackManager, StackInfo stack, String serviceName, String extensionName) {
+    // Check if service is from an extension at the current stack level, if so then it isn't inherited from its parent stack version
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Service is at requested stack/version level " + serviceName);
+      return false;
+    }
+
+    return isExtensionService(stackManager, stack.getName(), stack.getParentStackVersion(), serviceName, extensionName);
+  }
+
+  private static boolean isExtensionService(StackManager stackManager, String stackName, String stackVersion, String serviceName, String extensionName) {
+    LOG.debug("Checking at stack/version " + stackName + "/" + stackVersion);
+    StackInfo stack = stackManager.getStack(stackName, stackVersion);
+
+    if (stack == null) {
+      LOG.warn("Stack/version not found " + stackName + "/" + stackVersion);
+      return false;
+    }
+
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Stack/version " + stackName + "/" + stackVersion + " contains service " + serviceName);
+      return true;
+    }
+    else {
+      return isExtensionService(stackManager, stackName, stack.getParentStackVersion(), serviceName, extensionName);
+    }
+  }
+
+  private static boolean isExtensionService(StackInfo stack, String serviceName, String extensionName) {
+    ExtensionInfo extension = stack.getExtension(extensionName);
+    if (extension == null) {
+      LOG.debug("Extension not found " + extensionName);
+      return false;
+    }
+
+    return extension.getService(serviceName) != null;
+  }
+
   private static void validateRequiredExtensions(StackInfo stack, ExtensionInfo extension) throws AmbariException {
     for (ExtensionMetainfoXml.Extension requiredExtension : extension.getExtensions()) {
       if (requiredExtension != null) {

+ 32 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java

@@ -38,6 +38,7 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RefreshCommand;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
@@ -197,6 +198,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     if (parentVersion != null) {
       mergeStackWithParent(parentVersion, allStacks, commonServices, extensions);
     }
+
     for (ExtensionInfo extension : stackInfo.getExtensions()) {
       String extensionKey = extension.getName() + StackManager.PATH_DELIMITER + extension.getVersion();
       ExtensionModule extensionModule = extensions.get(extensionKey);
@@ -403,6 +405,9 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
 
   private void addExtensionServices() throws AmbariException {
     for (ExtensionModule extension : extensionModules.values()) {
+      for (Map.Entry<String, ServiceModule> entry : extension.getServiceModules().entrySet()) {
+        serviceModules.put(entry.getKey(), entry.getValue());
+      }
       stackInfo.addExtension(extension.getModuleInfo());
     }
   }
@@ -581,6 +586,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       }
       // Read the service and available configs for this stack
       populateServices();
+
       if (!stackInfo.isValid()) {
         setValid(false);
         addErrors(stackInfo.getErrors());
@@ -629,7 +635,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     for (ServiceInfo serviceInfo : serviceInfos) {
       ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory);
       serviceModules.add(serviceModule);
-      if (!serviceModule.isValid()){
+      if (!serviceModule.isValid()) {
         stackInfo.setValid(false);
         setValid(false);
         stackInfo.addErrors(serviceModule.getErrors());
@@ -771,7 +777,11 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     // relationship into map. Since we do not have the reverse {@link PropertyInfo},
     // we have to loop through service-configs again later.
     for (ServiceModule serviceModule : serviceModules.values()) {
+
+      Map<String, Map<String, String>> componentRefreshCommandsMap = new HashMap();
+
       for (PropertyInfo pi : serviceModule.getModuleInfo().getProperties()) {
+
         for (PropertyDependencyInfo pdi : pi.getDependsOnProperties()) {
           String type = ConfigHelper.fileNameToConfigType(pi.getFilename());
           String name = pi.getName();
@@ -786,7 +796,28 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
             dependedByMap.put(pdi, newDependenciesSet);
           }
         }
+
+        // set refresh commands
+        if (pi.getSupportedRefreshCommands() != null && pi.getSupportedRefreshCommands().size() > 0) {
+          String type = ConfigHelper.fileNameToConfigType(pi.getFilename());
+          String propertyName = type + "/" + pi.getName();
+
+          Map<String, String> refreshCommandPropertyMap = componentRefreshCommandsMap.get(propertyName);
+
+          for (RefreshCommand refreshCommand : pi.getSupportedRefreshCommands()) {
+            String componentName = refreshCommand.getComponentName();
+            if (refreshCommandPropertyMap == null) {
+              refreshCommandPropertyMap = new HashMap<>();
+              componentRefreshCommandsMap.put(propertyName, refreshCommandPropertyMap);
+            }
+            refreshCommandPropertyMap.put(componentName, refreshCommand.getCommand());
+          }
+
+        }
+
       }
+
+      stackInfo.getRefreshCommandConfiguration().addRefreshCommands(componentRefreshCommandsMap);
     }
 
     // Go through all service-configs again and set their 'depended-by' if necessary.

+ 156 - 8
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java

@@ -17,12 +17,15 @@
  */
 package org.apache.ambari.server.state;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
@@ -41,6 +44,7 @@ import org.apache.ambari.server.state.PropertyInfo.PropertyType;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.SecretReference;
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -72,6 +76,8 @@ public class ConfigHelper {
    */
   private final Cache<Integer, Boolean> staleConfigsCache;
 
+  private final Cache<Integer, String> refreshConfigCommandCache;
+
   private static final Logger LOG =
       LoggerFactory.getLogger(ConfigHelper.class);
 
@@ -113,6 +119,9 @@ public class ConfigHelper {
     STALE_CONFIGS_CACHE_EXPIRATION_TIME = configuration.staleConfigCacheExpiration();
     staleConfigsCache = CacheBuilder.newBuilder().
         expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
+
+    refreshConfigCommandCache = CacheBuilder.newBuilder().
+            expireAfterWrite(STALE_CONFIGS_CACHE_EXPIRATION_TIME, TimeUnit.SECONDS).build();
   }
 
   /**
@@ -1302,6 +1311,8 @@ public class ConfigHelper {
 
     StackId stackId = sch.getServiceComponent().getDesiredStackId();
 
+    StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
+
     ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
             stackId.getStackVersion(), sch.getServiceName());
 
@@ -1316,8 +1327,10 @@ public class ConfigHelper {
     // ---- merge values, determine changed keys, check stack: stale
 
     Iterator<Entry<String, Map<String, String>>> it = desired.entrySet().iterator();
+    List<String> changedProperties = new LinkedList<>();
 
-    while (it.hasNext() && !stale) {
+    while (it.hasNext()) {
+      boolean staleEntry = false;
       Entry<String, Map<String, String>> desiredEntry = it.next();
 
       String type = desiredEntry.getKey();
@@ -1325,29 +1338,108 @@ public class ConfigHelper {
 
       if (!actual.containsKey(type)) {
         // desired is set, but actual is not
-        if (!serviceInfo.hasConfigDependency(type)) {
-          stale = componentInfo != null && componentInfo.hasConfigType(type);
-        } else {
-          stale = true;
-        }
+        staleEntry = (serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type));
       } else {
         // desired and actual both define the type
         HostConfig hc = actual.get(type);
         Map<String, String> actualTags = buildTags(hc);
 
         if (!isTagChanged(tags, actualTags, hasGroupSpecificConfigsForType(cluster, sch.getHostName(), type))) {
-          stale = false;
+          staleEntry = false;
         } else {
-          stale = serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type);
+          staleEntry = (serviceInfo.hasConfigDependency(type) || componentInfo.hasConfigType(type));
+          if (staleEntry) {
+            Collection<String> changedKeys = findChangedKeys(cluster, type, tags.values(), actualTags.values());
+            changedProperties.addAll(changedKeys);
+          }
         }
       }
+      stale = stale | staleEntry;
     }
+    
+    String refreshCommand = calculateRefreshCommand(stackInfo.getRefreshCommandConfiguration(), sch, changedProperties);
+
     if (STALE_CONFIGS_CACHE_ENABLED) {
       staleConfigsCache.put(staleHash, stale);
+      if (refreshCommand != null) {
+        refreshConfigCommandCache.put(staleHash, refreshCommand);
+      }
     }
+
+    // gather all changed properties and see if we can find a common refreshConfigs command for this component
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Changed properties {} ({}) {} :  COMMAND: {}", stale, sch.getServiceComponentName(), sch.getHostName(), refreshCommand);
+      for (String p : changedProperties) {
+        LOG.debug(p);
+      }
+    }
+
     return stale;
   }
 
+  public String getRefreshConfigsCommand(Cluster cluster, String hostName, String serviceName, String componentName) throws AmbariException {
+    ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(componentName);
+    ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
+    return getRefreshConfigsCommand(cluster, sch);
+  }
+
+  public String getRefreshConfigsCommand(Cluster cluster, ServiceComponentHost sch) throws AmbariException {
+    String refreshCommand = null;
+
+    Map<String, HostConfig> actual = sch.getActualConfigs();
+    if (STALE_CONFIGS_CACHE_ENABLED) {
+      Map<String, Map<String, String>> desired = getEffectiveDesiredTags(cluster, sch.getHostName(),
+              cluster.getDesiredConfigs());
+      int staleHash = Objects.hashCode(actual.hashCode(),
+              desired.hashCode(),
+              sch.getHostName(),
+              sch.getServiceComponentName(),
+              sch.getServiceName());
+      refreshCommand = refreshConfigCommandCache.getIfPresent(staleHash);
+    }
+    return refreshCommand;
+  }
+
+
+  /**
+   * Calculates refresh command for a set of changed properties as follows:
+   *  - if a property has no refresh command return null
+   *  - in case of multiple refresh commands: as REFRESH_CONFIGS is executed by default in case of any other command as well,
+   *  can be overriden by RELOAD_CONFIGS or any other custom command, however in case of any other different commands return null
+   *  as it's not possible to refresh all properties with one command.
+   *
+   *  examples:
+   *     {REFRESH_CONFIGS, REFRESH_CONFIGS, RELOAD_CONFIGS} ==> RELOAD_CONFIGS
+   *     {REFRESH_CONFIGS, RELOADPROXYUSERS, RELOAD_CONFIGS} ==> null
+   *
+   * @param refreshCommandConfiguration
+   * @param sch
+   * @param changedProperties
+   * @return
+   */
+  private String calculateRefreshCommand(RefreshCommandConfiguration refreshCommandConfiguration,
+                                         ServiceComponentHost sch, List<String> changedProperties) {
+
+    String finalRefreshCommand = null;
+    for (String propertyName : changedProperties) {
+      String refreshCommand = refreshCommandConfiguration.getRefreshCommandForComponent(sch, propertyName);
+      if (refreshCommand == null) {
+        return null;
+      }
+      if (finalRefreshCommand == null) {
+        finalRefreshCommand = refreshCommand;
+      }
+      if (!finalRefreshCommand.equals(refreshCommand)) {
+        if (finalRefreshCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+          finalRefreshCommand = refreshCommand;
+        } else if (!refreshCommand.equals(RefreshCommandConfiguration.REFRESH_CONFIGS)) {
+          return null;
+        }
+      }
+    }
+    return finalRefreshCommand;
+  }
+
   /**
    * Determines if the hostname has group specific configs for the type specified
    *
@@ -1373,6 +1465,62 @@ public class ConfigHelper {
     return false;
   }
 
+  /**
+   * @return the keys that have changed values
+   */
+  private Collection<String> findChangedKeys(Cluster cluster, String type,
+                                             Collection<String> desiredTags, Collection<String> actualTags) {
+
+    Map<String, String> desiredValues = new HashMap<>();
+    Map<String, String> actualValues = new HashMap<>();
+
+    for (String tag : desiredTags) {
+      Config config = cluster.getConfig(type, tag);
+      if (null != config) {
+        desiredValues.putAll(config.getProperties());
+      }
+    }
+
+    for (String tag : actualTags) {
+      Config config = cluster.getConfig(type, tag);
+      if (null != config) {
+        actualValues.putAll(config.getProperties());
+      }
+    }
+
+    List<String> keys = new ArrayList<>();
+
+    for (Entry<String, String> entry : desiredValues.entrySet()) {
+      String key = entry.getKey();
+      String value = entry.getValue();
+
+      if (!actualValues.containsKey(key) || !valuesAreEqual(actualValues.get(key), value)) {
+        keys.add(type + "/" + key);
+      }
+    }
+
+    return keys;
+  }
+
+  /**
+   * Compares values as double in case they are numbers.
+   * @param actualValue
+   * @param newValue
+   * @return
+   */
+  private  boolean valuesAreEqual(String actualValue, String newValue) {
+    boolean actualValueIsNumber = NumberUtils.isNumber(actualValue);
+    boolean newValueIsNumber = NumberUtils.isNumber(newValue);
+    if (actualValueIsNumber && newValueIsNumber) {
+      Double ab = Double.parseDouble(actualValue);
+      Double bb = Double.parseDouble(newValue);
+      return ab.equals(bb);
+    } else if (!actualValueIsNumber && !newValueIsNumber) {
+      return actualValue.equals(newValue);
+    }
+    return false;
+  }
+
   /**
    * @return the map of tags for a desired config
    */

+ 29 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java

@@ -90,6 +90,11 @@ public class PropertyInfo {
   private Set<PropertyDependencyInfo> usedByProperties =
           new HashSet<>();
 
+  @XmlElementWrapper(name="supported-refresh-commands")
+  @XmlElement(name="refresh-command")
+  private Set<RefreshCommand> supportedRefreshCommands = new HashSet<>();
+
+
   //This method is called after all the properties (except IDREF) are unmarshalled for this object,
   //but before this object is set to the parent object.
   void afterUnmarshal(Unmarshaller unmarshaller, Object parent) {
@@ -209,6 +214,30 @@ public class PropertyInfo {
     this.requireInput = requireInput;
   }
 
+  public List<Element> getPropertyAttributes() {
+    return propertyAttributes;
+  }
+
+  public void setPropertyAttributes(List<Element> propertyAttributes) {
+    this.propertyAttributes = propertyAttributes;
+  }
+
+  public Set<RefreshCommand> getSupportedRefreshCommands() {
+    return supportedRefreshCommands;
+  }
+
+  public void setSupportedRefreshCommands(Set<RefreshCommand> supportedRefreshCommands) {
+    this.supportedRefreshCommands = supportedRefreshCommands;
+  }
+
+  /**
+   * Willcard properties should not be included to stack configurations.
+   * @return
+   */
+  public boolean shouldBeConfigured() {
+    return !getName().contains("*");
+  }
+
   @Override
   public int hashCode() {
     final int prime = 31;

+ 52 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommand.java

@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import javax.xml.bind.annotation.XmlAttribute;
+
+/**
+ * Represents a RefreshCommand defined for a component and a property.
+ */
+public class RefreshCommand {
+
+  @XmlAttribute(name="componentName", required = true)
+  private String componentName;
+
+  /**
+   * Default command is reload_configs.
+   */
+  @XmlAttribute(name="command", required = false)
+  private String command = RefreshCommandConfiguration.RELOAD_CONFIGS;
+
+  public RefreshCommand() {
+  }
+
+  public RefreshCommand(String componentName, String command) {
+    this.componentName = componentName;
+    this.command = command;
+  }
+
+  public String getComponentName() {
+    return componentName;
+  }
+
+  public String getCommand() {
+    return command;
+  }
+
+}

+ 71 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/RefreshCommandConfiguration.java

@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class RefreshCommandConfiguration {
+
+  public static final String RELOAD_CONFIGS = "reload_configs";
+  public static final String REFRESH_CONFIGS = "refresh_configs";
+
+  private Map<String, Map<String, String>> propertyComponentCommandMap;
+
+  public RefreshCommandConfiguration() {
+  }
+
+  private String findKey(String propertyName) {
+    for (String keyName : propertyComponentCommandMap.keySet()) {
+      if (propertyName.startsWith(keyName)) {
+        return keyName;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * If no command is defined for a component then the default command will be REFRESH_CONFIGS in case of a client component or
+   * if there's only one command defined for an another component. This is because if RELOAD_CONFIGS is defined for NAMENODE then
+   * presumably other dependent components will need just a refresh.
+   */
+  public String getRefreshCommandForComponent(ServiceComponentHost sch, String propertyName) {
+    if (sch.isClientComponent()) {
+      return REFRESH_CONFIGS;
+    }
+    String keyName = findKey(propertyName);
+    Map<String, String> componentCommandMap = propertyComponentCommandMap.get(keyName);
+    if (componentCommandMap != null) {
+      String commandForComponent = componentCommandMap.get(sch.getServiceComponentName());
+      if (commandForComponent != null) {
+        return commandForComponent;
+      } else if(componentCommandMap.size() == 1) {
+        return REFRESH_CONFIGS;
+      }
+    }
+    return null;
+  }
+
+  public void addRefreshCommands(Map<String, Map<String, String>> refreshCommands) {
+    if (propertyComponentCommandMap == null) {
+      propertyComponentCommandMap = new HashMap();
+    }
+    propertyComponentCommandMap.putAll(refreshCommands);
+  }
+
+}

+ 10 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java

@@ -90,6 +90,8 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   * */
   private List<String> servicesWithNoConfigs = new ArrayList<>();
 
+  private RefreshCommandConfiguration refreshCommandConfiguration = new RefreshCommandConfiguration();
+
   public String getMinJdk() {
     return minJdk;
   }
@@ -605,4 +607,12 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   public VersionDefinitionXml getLatestVersionDefinition() {
     return latestVersion;
   }
+
+  public RefreshCommandConfiguration getRefreshCommandConfiguration() {
+    return refreshCommandConfiguration;
+  }
+
+  public void setRefreshCommandConfiguration(RefreshCommandConfiguration refreshCommandConfiguration) {
+    this.refreshCommandConfiguration = refreshCommandConfiguration;
+  }
 }

+ 18 - 6
ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java

@@ -304,8 +304,8 @@ public class UpgradeContext {
         throw new AmbariException(
             String.format("There are no upgrades for cluster %s which are marked as revertable",
                 cluster.getClusterName()));
-      }      
-      
+      }
+
       if (!revertUpgrade.getOrchestration().isRevertable()) {
         throw new AmbariException(String.format("The %s repository type is not revertable",
             revertUpgrade.getOrchestration()));
@@ -323,14 +323,26 @@ public class UpgradeContext {
             revertableUpgrade.getRepositoryVersion().getVersion()));
       }
 
+      // !!! build all service-specific reversions
       Set<RepositoryVersionEntity> priors = new HashSet<>();
+      Map<String, Service> clusterServices = cluster.getServices();
       for (UpgradeHistoryEntity history : revertUpgrade.getHistory()) {
+        String serviceName = history.getServiceName();
+        String componentName = history.getComponentName();
+
         priors.add(history.getFromReposistoryVersion());
 
-        // !!! build all service-specific
-        m_services.add(history.getServiceName());
-        m_sourceRepositoryMap.put(history.getServiceName(), history.getTargetRepositoryVersion());
-        m_targetRepositoryMap.put(history.getServiceName(), history.getFromReposistoryVersion());
+        // if the service is no longer installed, do nothing
+        if (!clusterServices.containsKey(serviceName)) {
+          LOG.warn("{}/{} will not be reverted since it is no longer installed in the cluster",
+              serviceName, componentName);
+
+          continue;
+        }
+
+        m_services.add(serviceName);
+        m_sourceRepositoryMap.put(serviceName, history.getTargetRepositoryVersion());
+        m_targetRepositoryMap.put(serviceName, history.getFromReposistoryVersion());
       }
 
       if (priors.size() != 1) {

+ 25 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java

@@ -258,6 +258,31 @@ public abstract class AbstractKerberosDescriptor {
     }
   }
 
+  /**
+   * Calculate the path to this identity descriptor for logging purposes.
+   * Examples:
+   * <ul>
+   * <li>/</li>
+   * <li>/SERVICE</li>
+   * <li>/SERVICE/COMPONENT</li>
+   * <li>/SERVICE/COMPONENT/identity_name</li>
+   * </ul>
+   *
+   * @return a path
+   */
+  public String getPath() {
+    //
+    StringBuilder path = new StringBuilder();
+    AbstractKerberosDescriptor current = this;
+    while (current != null && (current.getName() != null)) {
+      path.insert(0, current.getName());
+      path.insert(0, '/');
+      current = current.getParent();
+    }
+
+    return path.toString();
+  }
+
   /**
    * An enumeration of the different Kerberos (sub)descriptors for internal use.
    */

+ 4 - 14
ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptorContainer.java

@@ -862,22 +862,9 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
           referencedIdentity = getReferencedIdentityDescriptor(identity.getName());
 
           if(referencedIdentity != null) {
-            // Calculate the path to this identity descriptor for logging purposes.
-            // Examples:
-            //   /
-            //   /SERVICE
-            //   /SERVICE/COMPONENT
-            StringBuilder path = new StringBuilder();
-            AbstractKerberosDescriptor parent = identity.getParent();
-            while(parent != null && (parent.getName() != null)) {
-              path.insert(0, parent.getName());
-              path.insert(0, '/');
-              parent = parent.getParent();
-            }
-
             // Log this since it is deprecated...
             LOG.warn("Referenced identities should be declared using the identity's \"reference\" attribute, not the identity's \"name\" attribute." +
-                " This is a deprecated feature. Problems may occur in the future unless this is corrected: {}:{}", path, identity.getName());
+                " This is a deprecated feature. Problems may occur in the future unless this is corrected: {}:{}", identity.getPath(), identity.getName());
           }
         }
       } catch (AmbariException e) {
@@ -896,6 +883,9 @@ public abstract class AbstractKerberosDescriptorContainer extends AbstractKerber
       } else {
         dereferencedIdentity = new KerberosIdentityDescriptor(identity.toMap());
       }
+
+      // Force the path for this identity descriptor to be the same as the original identity descriptor's.
+      dereferencedIdentity.setPath(identity.getPath());
     }
 
     return dereferencedIdentity;

+ 160 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java

@@ -17,10 +17,15 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.collections.Predicate;
 import org.apache.ambari.server.collections.PredicateUtils;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.common.base.Optional;
 
@@ -94,6 +99,8 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
    */
   private Predicate when = null;
 
+  private String path = null;
+
   /**
    * Creates a new KerberosIdentityDescriptor
    *
@@ -156,6 +163,47 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     return reference;
   }
 
+  /**
+   * Gets the absolute path to the referenced Kerberos identity definition
+   *
+   * @return the path to the referenced Kerberos identity definition or <code>null</code> if not set
+   */
+  public String getReferenceAbsolutePath() {
+    String absolutePath;
+    if(StringUtils.isEmpty(reference)) {
+      absolutePath = getName();
+    }
+    else {
+      absolutePath = reference;
+    }
+
+    if(!StringUtils.isEmpty(absolutePath) && !absolutePath.startsWith("/")) {
+      String path = getPath();
+      if(path == null) {
+        path = "";
+      }
+
+      if(absolutePath.startsWith("..")) {
+        AbstractKerberosDescriptor parent = getParent();
+        if(parent != null) {
+          parent = parent.getParent();
+
+          if(parent != null) {
+            absolutePath = absolutePath.replace("..", parent.getPath());
+          }
+        }
+      }
+      else if(absolutePath.startsWith(".")) {
+        AbstractKerberosDescriptor parent = getParent();
+        if (parent != null) {
+          absolutePath = absolutePath.replace(".", parent.getPath());
+        }
+      }
+    }
+
+    return absolutePath;
+  }
+
   /**
    * Sets the path to the referenced Kerberos identity definition
    *
@@ -356,6 +404,59 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     }
   }
 
+  /**
+   * Determines whether this {@link KerberosIdentityDescriptor} indicates it is a refrence to some
+   * other {@link KerberosIdentityDescriptor}.
+   * <p>
+   * A KerberosIdentityDescriptor is a reference if it's <code>reference</code> attibute is set
+   * or if (for backwards compatibility), its name indicates a path. For exmaple:
+   * <ul>
+   * <li><code>SERVICE/COMPONENT/identitiy_name</code></li>
+   * <li><code>/identity_name</code></li>
+   * <li><code>./identity_name</code></li>
+   * </ul>
+   *
+   * @return true if this {@link KerberosIdentityDescriptor} indicates a reference; otherwise false
+   */
+  public boolean isReference() {
+    String name = getName();
+    return !StringUtils.isEmpty(reference) ||
+        (!StringUtils.isEmpty(name) && (name.startsWith("/") || name.startsWith("./")));
+  }
+
+  /**
+   * Calculate the path to this identity descriptor for logging purposes.
+   * Examples:
+   * /
+   * /SERVICE
+   * /SERVICE/COMPONENT
+   * /SERVICE/COMPONENT/identity_name
+   * <p>
+   * This implementation calculates and caches the path if the path has not been previously set.
+   *
+   * @return a path
+   */
+  @Override
+  public String getPath() {
+    if (path == null) {
+      path = super.getPath();
+    }
+
+    return path;
+  }
+
+  /**
+   * Explicitly set the path to this {@link KerberosIdentityDescriptor}.
+   * <p>
+   * This is useful when creating detached identity descriptors while dereferencing identity references
+   * so that the path information is not lost.
+   *
+   * @param path a path
+   */
+  void setPath(String path) {
+    this.path = path;
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +
@@ -406,4 +507,63 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
       return false;
     }
   }
+
+  /**
+   * Find all of the {@link KerberosIdentityDescriptor}s that reference this {@link KerberosIdentityDescriptor}
+   *
+   * @return a list of {@link KerberosIdentityDescriptor}s
+   */
+  public List<KerberosIdentityDescriptor> findReferences() {
+    AbstractKerberosDescriptor root = getRoot();
+    if(root instanceof AbstractKerberosDescriptorContainer) {
+      return findIdentityReferences((AbstractKerberosDescriptorContainer)root, getPath());
+    }
+    else {
+      return null;
+    }
+  }
+
+  /**
+   * Given a root, recursively traverse the tree of {@link AbstractKerberosDescriptorContainer}s looking for
+   * {@link KerberosIdentityDescriptor}s that declare the given path as the referenced Kerberos identity.
+   *
+   * @param root the starting point
+   * @param path the path to the referenced {@link KerberosIdentityDescriptor} in the {@link KerberosDescriptor}
+   * @return a list of {@link KerberosIdentityDescriptor}s
+   */
+  private List<KerberosIdentityDescriptor> findIdentityReferences(AbstractKerberosDescriptorContainer root, String path) {
+    if (root == null) {
+      return null;
+    }
+
+    List<KerberosIdentityDescriptor> references = new ArrayList<>();
+
+    // Process the KerberosIdentityDescriptors found in this node.
+    List<KerberosIdentityDescriptor> identityDescriptors = root.getIdentities();
+    if (identityDescriptors != null) {
+      for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+        if (identityDescriptor.isReference()) {
+          String reference = identityDescriptor.getReferenceAbsolutePath();
+
+          if (!StringUtils.isEmpty(reference) && path.equals(reference)) {
+            references.add(identityDescriptor);
+          }
+        }
+      }
+    }
+
+    // Process the children of the node
+    Collection<? extends AbstractKerberosDescriptorContainer> children = root.getChildContainers();
+    if(!CollectionUtils.isEmpty(children)) {
+      for (AbstractKerberosDescriptorContainer child : children) {
+        Collection<KerberosIdentityDescriptor> childReferences = findIdentityReferences(child, path);
+        if (!CollectionUtils.isEmpty(childReferences)) {
+          // If references were found in the current child, add them to this node's list of references.
+          references.addAll(childReferences);
+        }
+      }
+    }
+
+    return references;
+  }
 }

+ 10 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java

@@ -1224,6 +1224,16 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
       LOG.error("Could not determine stale config", e);
     }
 
+    try {
+      Cluster cluster = clusters.getCluster(clusterName);
+      ServiceComponent serviceComponent = cluster.getService(serviceName).getServiceComponent(serviceComponentName);
+      ServiceComponentHost sch = serviceComponent.getServiceComponentHost(hostName);
+      String refreshConfigsCommand = helper.getRefreshConfigsCommand(cluster,sch);
+      r.setReloadConfig(refreshConfigsCommand != null);
+    } catch (Exception e) {
+      LOG.error("Could not determine reload config flag", e);
+    }
+
     return r;
   }
 

+ 12 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml

@@ -185,4 +185,16 @@ DEFAULT
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>hadoop.proxyuser.*</name>
+    <value/>
+    <description>
+      This * property is not configured it's used just to define refresh commands for all properties
+      prefixed with hadoop.proxyuser.
+    </description>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reloadproxyusers" />
+    </supported-refresh-commands>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>

+ 3 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml

@@ -184,6 +184,9 @@
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
     <on-ambari-upgrade add="true"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>

+ 12 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py

@@ -31,7 +31,7 @@ from resource_management.libraries.functions.decorator import retry
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from resource_management.core.logger import Logger
-from hdfs import hdfs
+from hdfs import hdfs, reconfig
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 from utils import get_hdfs_binary
@@ -57,6 +57,17 @@ class DataNode(Script):
     hdfs("datanode")
     datanode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("datanode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("datanode", params.dfs_dn_ipc_address)
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 57 - 5
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py

@@ -20,11 +20,16 @@ Ambari Agent
 """
 
 from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources.system import Execute, Directory, File, Link
 from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -138,10 +143,11 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled:
+    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params
@@ -155,6 +161,52 @@ def install_snappy():
        to=params.so_src_x64,
   )
 
+class ConfigStatusParser():
+    def __init__(self):
+        self.reconfig_successful = False
+
+    def handle_new_line(self, line, is_stderr):
+        if is_stderr:
+            return
+
+        if line.startswith('SUCCESS: Changed property'):
+            self.reconfig_successful = True
+
+        Logger.info('[reconfig] %s' % (line))
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def reconfig(componentName, componentAddress):
+    import params
+
+    if params.security_enabled:
+        Execute(params.nn_kinit_cmd,
+                user=params.hdfs_user
+                )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} start')
+
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=True,
+             path=params.hadoop_bin_dir
+             )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} status')
+    config_status_parser = ConfigStatusParser()
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=False,
+             path=params.hadoop_bin_dir,
+             on_new_line=config_status_parser.handle_new_line
+             )
+
+
+    if not config_status_parser.reconfig_successful:
+        Logger.info('Reconfiguration failed')
+        raise Fail('Reconfiguration failed!')
+
+    Logger.info('Reconfiguration successfully completed.')
+
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def hdfs(component=None):
   import params

+ 5 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py

@@ -42,6 +42,11 @@ class HdfsClient(Script):
     env.set_params(params)
     hdfs()
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 21 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py

@@ -430,6 +430,27 @@ def is_namenode_formatted(params):
 
   return False
 
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def refreshProxyUsers():
+  import params
+
+  if params.security_enabled:
+    Execute(params.nn_kinit_cmd,
+            user=params.hdfs_user
+            )
+
+  if params.dfs_ha_enabled:
+    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+    # need to execute each command scoped to a particular namenode
+    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshSuperUserGroupsConfiguration')
+  else:
+    nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshSuperUserGroupsConfiguration')
+  ExecuteHadoop(nn_refresh_cmd,
+                user=params.hdfs_user,
+                conf_dir=params.hadoop_conf_dir,
+                bin_dir=params.hadoop_bin_dir)
+
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def decommission():
   import params

+ 0 - 6
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/install_params.py

@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -32,8 +31,3 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

+ 19 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py

@@ -46,8 +46,8 @@ from ambari_commons import OSConst
 
 
 import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
+from hdfs_namenode import namenode, wait_for_safemode_off, refreshProxyUsers
+from hdfs import hdfs, reconfig
 import hdfs_rebalance
 from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
 
@@ -86,6 +86,23 @@ class NameNode(Script):
     hdfs_binary = self.get_hdfs_binary()
     namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("namenode", params.namenode_address)
+
+  def reloadproxyusers(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD HDFS PROXY USERS")
+    refreshProxyUsers()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 0 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -389,7 +388,6 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

+ 10 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py

@@ -44,6 +44,16 @@ class SNameNode(Script):
     hdfs("secondarynamenode")
     snamenode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("secondarynamenode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 6 - 0
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml

@@ -181,6 +181,9 @@
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
     <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>
@@ -637,5 +640,8 @@
     <name>hadoop.caller.context.enabled</name>
     <value>true</value>
     <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
 </configuration>

+ 12 - 1
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py

@@ -25,7 +25,7 @@ from resource_management.libraries.functions.stack_features import check_stack_f
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from resource_management.core.logger import Logger
-from hdfs import hdfs
+from hdfs import hdfs, reconfig
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 from utils import get_hdfs_binary
@@ -50,6 +50,17 @@ class DataNode(Script):
     hdfs("datanode")
     datanode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("datanode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("datanode", params.dfs_dn_ipc_address)
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 57 - 5
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py

@@ -20,11 +20,16 @@ Ambari Agent
 """
 
 from resource_management.libraries.script.script import Script
-from resource_management.core.resources.system import Directory, File, Link
+from resource_management.core.resources.system import Execute, Directory, File, Link
 from resource_management.core.resources import Package
 from resource_management.core.source import Template
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
 import os
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
@@ -138,10 +143,11 @@ def hdfs(name=None):
        content=Template("slaves.j2")
   )
   
-  if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages,
-              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-              retry_count=params.agent_stack_retry_count)
+  if params.lzo_enabled:
+    lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params
@@ -155,6 +161,52 @@ def install_snappy():
        to=params.so_src_x64,
   )
 
+class ConfigStatusParser():
+    def __init__(self):
+        self.reconfig_successful = False
+
+    def handle_new_line(self, line, is_stderr):
+        if is_stderr:
+            return
+
+        if line.startswith('SUCCESS: Changed property'):
+            self.reconfig_successful = True
+
+        Logger.info('[reconfig] %s' % (line))
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def reconfig(componentName, componentAddress):
+    import params
+
+    if params.security_enabled:
+        Execute(params.nn_kinit_cmd,
+                user=params.hdfs_user
+                )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} start')
+
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=True,
+             path=params.hadoop_bin_dir
+             )
+
+    nn_reconfig_cmd = format('hdfs --config {hadoop_conf_dir} dfsadmin -reconfig {componentName} {componentAddress} status')
+    config_status_parser = ConfigStatusParser()
+    Execute (nn_reconfig_cmd,
+             user=params.hdfs_user,
+             logoutput=False,
+             path=params.hadoop_bin_dir,
+             on_new_line=config_status_parser.handle_new_line
+             )
+
+
+    if not config_status_parser.reconfig_successful:
+        Logger.info('Reconfiguration failed')
+        raise Fail('Reconfiguration failed!')
+
+    Logger.info('Reconfiguration successfully completed.')
+
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def hdfs(component=None):
   import params

+ 5 - 0
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py

@@ -42,6 +42,11 @@ class HdfsClient(Script):
     env.set_params(params)
     hdfs()
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 20 - 0
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py

@@ -460,6 +460,26 @@ def decommission():
                   conf_dir=conf_dir,
                   bin_dir=params.hadoop_bin_dir)
 
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def refreshProxyUsers():
+  import params
+
+  if params.security_enabled:
+    Execute(params.nn_kinit_cmd,
+            user=params.hdfs_user
+            )
+
+  if params.dfs_ha_enabled:
+    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+    # need to execute each command scoped to a particular namenode
+    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshSuperUserGroupsConfiguration')
+  else:
+    nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshSuperUserGroupsConfiguration')
+  ExecuteHadoop(nn_refresh_cmd,
+                user=params.hdfs_user,
+                conf_dir=params.hadoop_conf_dir,
+                bin_dir=params.hadoop_bin_dir)
+
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def decommission():
   import params

+ 0 - 6
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/install_params.py

@@ -23,7 +23,6 @@ if OSCheck.is_windows_family():
   exclude_packages = []
 else:
   from resource_management.libraries.functions.default import default
-  from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
   from resource_management.libraries.script.script import Script
 
   _config = Script.get_config()
@@ -32,8 +31,3 @@ else:
   # The logic for LZO also exists in OOZIE's params.py
   io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
   lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  lzo_packages = get_lzo_packages(stack_version_unformatted)
-
-  exclude_packages = []
-  if not lzo_enabled:
-    exclude_packages += lzo_packages

+ 19 - 2
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py

@@ -46,8 +46,8 @@ from ambari_commons import OSConst
 
 
 import namenode_upgrade
-from hdfs_namenode import namenode, wait_for_safemode_off
-from hdfs import hdfs
+from hdfs_namenode import namenode, wait_for_safemode_off, refreshProxyUsers
+from hdfs import hdfs, reconfig
 import hdfs_rebalance
 from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsadmin_base_command
 
@@ -86,6 +86,23 @@ class NameNode(Script):
     hdfs_binary = self.get_hdfs_binary()
     namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+    reconfig("namenode", params.namenode_address)
+
+  def reloadproxyusers(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD HDFS PROXY USERS")
+    refreshProxyUsers()
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 0 - 2
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py

@@ -40,7 +40,6 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
@@ -378,7 +377,6 @@ HdfsResource = functools.partial(
 # The logic for LZO also exists in OOZIE's params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
   
 name_node_params = default("/commandParams/namenode", None)
 

+ 10 - 0
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py

@@ -44,6 +44,16 @@ class SNameNode(Script):
     hdfs("secondarynamenode")
     snamenode(action="configure")
 
+  def save_configs(self, env):
+    import params
+    env.set_params(params)
+    hdfs("secondarynamenode")
+
+  def reload_configs(self, env):
+    import params
+    env.set_params(params)
+    Logger.info("RELOAD CONFIGS")
+
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)

+ 4 - 2
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py

@@ -37,6 +37,7 @@ from resource_management.libraries.functions.copy_tarball import get_current_ver
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.core.resources.packaging import Package
 from resource_management.core.shell import as_user, as_sudo, call, checked_call
 from resource_management.core.exceptions import Fail
@@ -305,8 +306,9 @@ def oozie_server_specific(upgrade_type):
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages,
+  if params.lzo_enabled:
+    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

+ 0 - 3
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py

@@ -30,7 +30,6 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -388,5 +387,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)

+ 3 - 2
ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py

@@ -275,8 +275,9 @@ def oozie_server_specific():
     Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
       not_if  = no_op_test)
 
-  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages,
+  if params.lzo_enabled:
+    all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
+    Package(all_lzo_packages,
             retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
             retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),

+ 0 - 3
ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py

@@ -28,7 +28,6 @@ from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.get_architecture import get_architecture
@@ -370,5 +369,3 @@ is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
 lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-all_lzo_packages = get_lzo_packages(stack_version_unformatted)

+ 21 - 12
ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/master.py

@@ -192,18 +192,8 @@ class Master(Script):
       notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + \
                            params.config['configurations']['zeppelin-config']['zeppelin.notebook.dir']
 
-    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-    kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
-
-    notebook_directory_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {notebook_directory};echo $?"),
-                                           user=params.zeppelin_user)[1]
-
-    #if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
-    if "\n" in notebook_directory_exists:
-      notebook_directory_exists = notebook_directory_exists.split("\n")[1]
 
-    # '1' means it does not exists
-    if notebook_directory_exists == '1':
+    if self.is_path_exists_in_HDFS(notebook_directory, params.zeppelin_user):
       # hdfs dfs -mkdir {notebook_directory}
       params.HdfsResource(format("{notebook_directory}"),
                           type="directory",
@@ -310,6 +300,22 @@ class Master(Script):
 
     return hdfs_interpreter_config
 
+  def is_path_exists_in_HDFS(self, path, as_user):
+    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+    path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -e {path};echo $?"),
+                             user=as_user)[1]
+
+    # if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+    if "\n" in path_exists:
+      path_exists = path_exists.split("\n")[1]
+
+    # '1' means it does not exists
+    if path_exists == '0':
+      return True
+    else:
+      return False
+
   def get_interpreter_settings(self):
     import params
     import json
@@ -320,12 +326,14 @@ class Master(Script):
 
       if 'zeppelin.config.fs.dir' in params.config['configurations']['zeppelin-config']:
         zeppelin_conf_fs = self.getZeppelinConfFS(params)
-        if os.path.exists(zeppelin_conf_fs):
+
+        if self.is_path_exists_in_HDFS(zeppelin_conf_fs, params.zeppelin_user):
           # copy from hdfs to /etc/zeppelin/conf/interpreter.json
           params.HdfsResource(interpreter_config,
                               type="file",
                               action="download_on_execute",
                               source=zeppelin_conf_fs,
+                              user=params.zeppelin_user,
                               group=params.zeppelin_group,
                               owner=params.zeppelin_user)
         else:
@@ -353,6 +361,7 @@ class Master(Script):
                             type="file",
                             action="create_on_execute",
                             source=interpreter_config,
+                            user=params.zeppelin_user,
                             group=params.zeppelin_group,
                             owner=params.zeppelin_user,
                             replace_existing_files=True)

+ 12 - 0
ambari-server/src/main/resources/configuration-schema.xsd

@@ -41,6 +41,13 @@
       <xs:element name="deleted" type="xs:boolean" minOccurs="0"/>
       <xs:element name="final" type="xs:boolean" minOccurs="0"/>
       <xs:element name="on-ambari-upgrade" type="propertyUpgradeBehavior" minOccurs="1"/>
+      <xs:element name="supported-refresh-commands" minOccurs="0">
+        <xs:complexType>
+          <xs:sequence>
+            <xs:element name="refresh-command" type="refreshCommands" minOccurs="1" maxOccurs="unbounded"/>
+          </xs:sequence>
+        </xs:complexType>
+      </xs:element>
       <xs:element name="on-stack-upgrade" type="propertyStackUpgradeBehavior" minOccurs="0"/>
       <xs:element name="property-type" minOccurs="0">
         <xs:simpleType>
@@ -84,6 +91,11 @@
     <xs:attribute name="merge" type="xs:boolean" use="optional" default="true"/>
   </xs:complexType>
 
+  <xs:complexType name="refreshCommands">
+    <xs:attribute name="componentName" type="xs:string" use="required"/>
+    <xs:attribute name="command" type="xs:string" use="optional"/>
+  </xs:complexType>
+
   <xs:complexType name="valueAttributesInfo">
     <xs:all>
       <xs:element name="type" type="xs:string" minOccurs="0"/>

+ 1 - 0
ambari-server/src/main/resources/properties.json

@@ -57,6 +57,7 @@
         "HostRoles/actual_configs",
         "params/run_smoke_test",
         "HostRoles/stale_configs",
+        "HostRoles/reload_configs",
         "HostRoles/desired_admin_state",
         "HostRoles/maintenance_state",
         "HostRoles/service_id",

+ 3 - 0
ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hdfs-site.xml

@@ -76,5 +76,8 @@
     <name>hadoop.caller.context.enabled</name>
     <value>true</value>
     <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
   </property>
 </configuration>

+ 0 - 30
ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json

@@ -34,27 +34,6 @@
             "configuration": "druid-common/druid.hadoop.security.kerberos.keytab"
           }
         },
-        {
-          "name": "superset",
-          "principal": {
-            "value": "${druid-env/druid_user}@${realm}",
-            "type": "user",
-            "configuration": "druid-superset/KERBEROS_PRINCIPAL",
-            "local_username": "${druid-env/druid_user}"
-          },
-          "keytab": {
-            "file": "${keytab_dir}/superset.headless.keytab",
-            "owner": {
-              "name": "${druid-env/druid_user}",
-              "access": "r"
-            },
-            "group": {
-              "name": "${cluster-env/user_group}",
-              "access": "r"
-            },
-            "configuration": "druid-superset/KERBEROS_KEYTAB"
-          }
-        },
         {
           "name": "druid_smokeuser",
           "reference": "/smokeuser"
@@ -105,15 +84,6 @@
               "reference": "/druid"
             }
           ]
-        },
-        {
-          "name": "DRUID_SUPERSET",
-          "identities": [
-            {
-              "name": "druid_druid_superset_druid",
-              "reference": "/druid"
-            }
-          ]
         }
       ],
       "configurations": [

+ 0 - 5
ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java

@@ -1443,11 +1443,6 @@ public class KerberosHelperTest extends EasyMockSupport {
         .andReturn(Collections.singletonList(schKerberosClient))
         .once();
 
-    final Clusters clusters = injector.getInstance(Clusters.class);
-    expect(clusters.getHost("host1"))
-        .andReturn(host)
-        .once();
-
     final AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
     expect(ambariManagementController.findConfigurationTagsWithOverrides(cluster, null))
         .andReturn(Collections.emptyMap())

+ 32 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java

@@ -88,6 +88,7 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 
 /**
  * BlueprintConfigurationProcessor unit tests.
@@ -7933,6 +7934,37 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     assertEquals(someString, metricsReporterRegister);
   }
 
+  @Test
+  public void druidProperties() throws Exception {
+    Map<String, Map<String, String>> properties = new HashMap<>();
+    Map<String, String> druidCommon = new HashMap<>();
+    String connectUriKey = "druid.metadata.storage.connector.connectURI";
+    String metastoreHostnameKey = "metastore_hostname";
+    String connectUriTemplate = "jdbc:mysql://%s:3306/druid?createDatabaseIfNotExist=true";
+    druidCommon.put(connectUriKey, String.format(connectUriTemplate, "%HOSTGROUP::group1%"));
+    druidCommon.put(metastoreHostnameKey, "%HOSTGROUP::group1%");
+    properties.put("druid-common", druidCommon);
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+    Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+
+    Collection<String> hgComponents1 = Sets.newHashSet("DRUID_COORDINATOR");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
+
+    Collection<String> hgComponents2 = Sets.newHashSet("DRUID_BROKER", "DRUID_OVERLORD", "DRUID_ROUTER");
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("host2"));
+
+    Collection<TestHostGroup> hostGroups = Arrays.asList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    configProcessor.doUpdateForClusterCreate();
+
+    assertEquals(String.format(connectUriTemplate, "host1"), clusterConfig.getPropertyValue("druid-common", connectUriKey));
+    assertEquals("host1", clusterConfig.getPropertyValue("druid-common", metastoreHostnameKey));
+  }
 
   @Test
   public void testAmsPropertiesDefault() throws Exception {

+ 4 - 4
ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java

@@ -77,7 +77,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   @Test
   public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
     installComponent(OOZIE, OOZIE_SERVER, HOST);
-    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1", "oozie_server2"));
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("/OOZIE/OOZIE_SERVER/oozie_server1", "/OOZIE/OOZIE_SERVER/oozie_server2"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -95,7 +95,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
     installComponent(OOZIE, OOZIE_SERVER, HOST);
     installComponent(OOZIE_2, OOZIE_SERVER_2, HOST);
-    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("oozie_server1"));
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, OOZIE, OOZIE_SERVER)), newHashSet("/OOZIE/OOZIE_SERVER/oozie_server1"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
@@ -106,7 +106,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
     installComponent(YARN, RESOURCE_MANAGER, HOST);
     installComponent(YARN_2, RESOURCE_MANAGER_2, HOST);
-    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("rm_unique"));
+    kerberosHelper.deleteIdentities(cluster, singletonList(new Component(HOST, YARN, RESOURCE_MANAGER)), newHashSet("/YARN/RESOURCE_MANAGER/rm_unique"));
     expectLastCall().once();
     replayAll();
     uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
@@ -133,7 +133,7 @@ public class KerberosIdentityCleanerTest extends EasyMockSupport {
   @Test
   public void removesServiceIdentitiesSkipComponentIdentitiesAfterServiceWasUninstalled() throws Exception {
     installComponent(OOZIE, OOZIE_SERVER, HOST);
-    kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("hdfs-service"));
+    kerberosHelper.deleteIdentities(cluster, hdfsComponents(), newHashSet("/HDFS/hdfs-service"));
     expectLastCall().once();
     replayAll();
     uninstallService(HDFS, hdfsComponents());

+ 7 - 7
ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java

@@ -19,6 +19,7 @@
 package org.apache.ambari.server.events.listeners.upgrade;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
@@ -335,12 +336,12 @@ public class HostVersionOutOfSyncListenerTest {
 
     for (HostVersionEntity hostVersionEntity : hostVersions) {
       RepositoryVersionEntity repoVersion = hostVersionEntity.getRepositoryVersion();
-      if (repoVersion.getVersion().equals(INSTALLED_VERSION) || repoVersion.getVersion().equals(INSTALLED_VERSION_2)) {
-        if (changedHosts.contains(hostVersionEntity.getHostName())) {
-          assertEquals(hostVersionEntity.getState(), RepositoryVersionState.OUT_OF_SYNC);
-        } else {
-          assertEquals(hostVersionEntity.getState(), RepositoryVersionState.INSTALLED);
-        }
+
+      if (repoVersion.getVersion().equals(INSTALLED_VERSION_2)) {
+        assertEquals(RepositoryVersionState.INSTALLED, hostVersionEntity.getState());
+      } else if (repoVersion.getVersion().equals(INSTALLED_VERSION)) {
+        assertTrue(changedHosts.contains(hostVersionEntity.getHostName()));
+        assertEquals(RepositoryVersionState.OUT_OF_SYNC, hostVersionEntity.getState());
       }
     }
   }
@@ -598,6 +599,5 @@ public class HostVersionOutOfSyncListenerTest {
         }
       }
     }
-
   }
 }

+ 28 - 3
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java

@@ -69,6 +69,9 @@ public class StackManagerExtensionTest  {
     StackEntity stack3 = new StackEntity();
     stack3.setStackName("HDP");
     stack3.setStackVersion("0.3");
+    StackEntity stack4 = new StackEntity();
+    stack4.setStackName("HDP");
+    stack4.setStackVersion("0.4");
     ExtensionEntity extension1 = new ExtensionEntity();
     extension1.setExtensionName("EXT");
     extension1.setExtensionVersion("0.1");
@@ -78,19 +81,28 @@ public class StackManagerExtensionTest  {
     ExtensionEntity extension3 = new ExtensionEntity();
     extension3.setExtensionName("EXT");
     extension3.setExtensionVersion("0.3");
+    ExtensionLinkEntity link1 = new ExtensionLinkEntity();
+    link1.setLinkId(new Long(-1));
+    link1.setStack(stack1);
+    link1.setExtension(extension1);
     List<ExtensionLinkEntity> list = new ArrayList<>();
+    List<ExtensionLinkEntity> linkList = new ArrayList<>();
+    linkList.add(link1);
 
     expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
     expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
     expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+    expect(stackDao.find("HDP", "0.4")).andReturn(stack3).atLeastOnce();
     expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
     expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
     expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
 
+    expect(linkDao.findByStack("HDP", "0.1")).andReturn(linkList).atLeastOnce();
     expect(linkDao.findByStack(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
     expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
+    expect(linkDao.findByStackAndExtension("HDP", "0.1", "EXT", "0.1")).andReturn(link1).atLeastOnce();
 
     replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
 
@@ -146,21 +158,34 @@ public class StackManagerExtensionTest  {
     assertNotNull(themes);
     assertTrue("Number of themes is " + themes.size(), themes.size() == 0);
 
-    StackInfo stack = stackManager.getStack("HDP", "0.2");
+    StackInfo stack = stackManager.getStack("HDP", "0.1");
     assertNotNull(stack.getService("OOZIE2"));
     oozie = stack.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
-    assertEquals(oozie.getVersion(), "4.0.0");
+    assertEquals(oozie.getVersion(), "3.2.0");
 
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.1");
+
+    stack = stackManager.getStack("HDP", "0.2");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 0);
 
     stack = stackManager.getStack("HDP", "0.3");
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
+    assertNotNull(extension.getService("OOZIE2"));
+    oozie = extension.getService("OOZIE2");
+    assertEquals(oozie.getVersion(), "4.0.0");
+
+    assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+    stack = stackManager.getStack("HDP", "0.4");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
     assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
   }

+ 74 - 2
ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java

@@ -158,6 +158,7 @@ public class ConfigHelperTest {
 
       cluster.addService("FLUME", repositoryVersion);
       cluster.addService("OOZIE", repositoryVersion);
+      cluster.addService("HDFS", repositoryVersion);
 
       final ClusterRequest clusterRequest2 =
           new ClusterRequest(cluster.getClusterId(), clusterName,
@@ -230,6 +231,45 @@ public class ConfigHelperTest {
       managementController.updateClusters(new HashSet<ClusterRequest>() {{
         add(clusterRequest5);
       }}, null);
+
+      // hdfs-site/hadoop.caller.context.enabled
+      ConfigurationRequest cr6 = new ConfigurationRequest();
+      cr6.setClusterName(clusterName);
+      cr6.setType("hdfs-site");
+      cr6.setVersionTag("version1");
+      cr6.setProperties(new HashMap<String, String>() {{
+        put("hadoop.caller.context.enabled", "true");
+      }});
+      cr6.setPropertiesAttributes(null);
+
+      final ClusterRequest clusterRequest6 =
+              new ClusterRequest(cluster.getClusterId(), clusterName,
+                      cluster.getDesiredStackVersion().getStackVersion(), null);
+
+      clusterRequest6.setDesiredConfig(Collections.singletonList(cr6));
+      managementController.updateClusters(new HashSet<ClusterRequest>() {{
+        add(clusterRequest6);
+      }}, null);
+
+      // hdfs-site/hadoop.caller.context.enabled
+      ConfigurationRequest cr7 = new ConfigurationRequest();
+      cr7.setClusterName(clusterName);
+      cr7.setType("hdfs-site");
+      cr7.setVersionTag("version2");
+      cr7.setProperties(new HashMap<String, String>() {{
+        put("hadoop.caller.context.enabled", "false");
+      }});
+      cr7.setPropertiesAttributes(null);
+
+      final ClusterRequest clusterRequest7 =
+              new ClusterRequest(cluster.getClusterId(), clusterName,
+                      cluster.getDesiredStackVersion().getStackVersion(), null);
+
+      clusterRequest7.setDesiredConfig(Collections.singletonList(cr7));
+      managementController.updateClusters(new HashSet<ClusterRequest>() {{
+        add(clusterRequest7);
+      }}, null);
+
     }
 
     @After
@@ -546,7 +586,7 @@ public class ConfigHelperTest {
               configHelper.getEffectiveDesiredTags(cluster, "h3"));
 
       Assert.assertNotNull(effectiveAttributes);
-      Assert.assertEquals(7, effectiveAttributes.size());
+      Assert.assertEquals(8, effectiveAttributes.size());
 
       Assert.assertTrue(effectiveAttributes.containsKey("global3"));
       Map<String, Map<String, String>> globalAttrs = effectiveAttributes.get("global3");
@@ -992,7 +1032,39 @@ public class ConfigHelperTest {
       Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
 
       verify(sch);
-    }
+  }
+
+  @Test
+  public void testCalculateRefreshCommands() throws Exception {
+
+    Map<String, HostConfig> schReturn = new HashMap<>();
+    HostConfig hc = new HostConfig();
+    // Put a different version to check for change
+    hc.setDefaultVersionTag("version1");
+    schReturn.put("hdfs-site", hc);
+
+    ServiceComponent sc = createNiceMock(ServiceComponent.class);
+
+    // set up mocks
+    ServiceComponentHost sch = createNiceMock(ServiceComponentHost.class);
+    expect(sc.getDesiredStackId()).andReturn(cluster.getDesiredStackVersion()).anyTimes();
+
+    // set up expectations
+    expect(sch.getActualConfigs()).andReturn(schReturn).anyTimes();
+    expect(sch.getHostName()).andReturn("h1").anyTimes();
+    expect(sch.getClusterId()).andReturn(cluster.getClusterId()).anyTimes();
+    expect(sch.getServiceName()).andReturn("HDFS").anyTimes();
+    expect(sch.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+    expect(sch.getServiceComponent()).andReturn(sc).anyTimes();
+
+    replay(sc, sch);
+
+    Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
+    String refreshConfigsCommand = configHelper.getRefreshConfigsCommand(cluster, sch);
+    Assert.assertEquals("reload_configs", refreshConfigsCommand);
+    verify(sch);
+  }
+
   }
 
   public static class RunWithCustomModule {

+ 20 - 0
ambari-server/src/test/java/org/apache/ambari/server/state/PropertyInfoTest.java

@@ -136,6 +136,26 @@ public class PropertyInfoTest {
     assertFalse(propertyInfo.getPropertyAmbariUpgradeBehavior().isDelete());
   }
 
+  @Test
+  public void testBehaviorWithSupportedRefreshCommandsTags() throws JAXBException {
+    // given
+    String xml =
+    "<property>\n" +
+    " <name>prop_name</name>\n" +
+    " <value>prop_val</value>\n" +
+    " <supported-refresh-commands>\n" +
+    "   <refresh-command componentName=\"NAMENODE\" command=\"reload_configs\" />\n" +
+    " </supported-refresh-commands>\n" +
+    "</property>";
+
+    // when
+    PropertyInfo propertyInfo = propertyInfoFrom(xml);
+
+    // then
+    assertEquals(propertyInfo.getSupportedRefreshCommands().iterator().next().getCommand(), "reload_configs");
+    assertEquals(propertyInfo.getSupportedRefreshCommands().iterator().next().getComponentName(), "NAMENODE");
+  }
+
   @Test
   public void testUnknownPropertyType() throws Exception {
     // Given

+ 58 - 2
ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java

@@ -23,6 +23,7 @@ import static junit.framework.Assert.assertTrue;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -110,6 +111,11 @@ public class UpgradeContextTest extends EasyMockSupport {
   @Mock
   private VersionDefinitionXml m_vdfXml;
 
+  /**
+   * The upgrade history to return for the completed upgrade.
+   */
+  private List<UpgradeHistoryEntity> m_upgradeHistory = new ArrayList<>();
+
   /**
    * The cluster services.
    */
@@ -128,7 +134,7 @@ public class UpgradeContextTest extends EasyMockSupport {
     expect(upgradeHistoryEntity.getServiceName()).andReturn(HDFS_SERVICE_NAME).anyTimes();
     expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
     expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
-    List<UpgradeHistoryEntity> upgradeHistory = Lists.newArrayList(upgradeHistoryEntity);
+    m_upgradeHistory = Lists.newArrayList(upgradeHistoryEntity);
 
     expect(m_repositoryVersionDAO.findByPK(1L)).andReturn(m_sourceRepositoryVersion).anyTimes();
     expect(m_repositoryVersionDAO.findByPK(99L)).andReturn(m_targetRepositoryVersion).anyTimes();
@@ -143,12 +149,13 @@ public class UpgradeContextTest extends EasyMockSupport {
     expect(m_completedRevertableUpgrade.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
     expect(m_completedRevertableUpgrade.getRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
     expect(m_completedRevertableUpgrade.getOrchestration()).andReturn(RepositoryType.PATCH).anyTimes();
-    expect(m_completedRevertableUpgrade.getHistory()).andReturn(upgradeHistory).anyTimes();
+    expect(m_completedRevertableUpgrade.getHistory()).andReturn(m_upgradeHistory).anyTimes();
     expect(m_completedRevertableUpgrade.getUpgradePackage()).andReturn(null).anyTimes();
 
     RepositoryVersionEntity hdfsRepositoryVersion = createNiceMock(RepositoryVersionEntity.class);
 
     expect(m_hdfsService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
+    expect(m_zookeeperService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
     expect(m_cluster.getService(HDFS_SERVICE_NAME)).andReturn(m_hdfsService).anyTimes();
     m_services.put(HDFS_SERVICE_NAME, m_hdfsService);
 
@@ -330,6 +337,55 @@ public class UpgradeContextTest extends EasyMockSupport {
     verifyAll();
   }
 
+  /**
+   * Tests that the {@link UpgradeContext} for a reversion has the correct
+   * services included in the reversion if one of the services in the original
+   * upgrade has since been deleted.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRevertWithDeletedService() throws Exception {
+    UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+    UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+
+    // give the completed upgrade 2 services which can be reverted
+    UpgradeHistoryEntity upgradeHistoryEntity = createNiceMock(UpgradeHistoryEntity.class);
+    expect(upgradeHistoryEntity.getServiceName()).andReturn(ZOOKEEPER_SERVICE_NAME).anyTimes();
+    expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
+    expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
+    m_upgradeHistory.add(upgradeHistoryEntity);
+
+    expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), EasyMock.anyObject(StackId.class),
+        EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
+        EasyMock.anyObject(UpgradeType.class), EasyMock.anyString())).andReturn(upgradePack).once();
+
+    expect(m_upgradeDAO.findRevertable(1L)).andReturn(m_completedRevertableUpgrade).once();
+
+    // remove HDFS, add ZK
+    m_services.remove(HDFS_SERVICE_NAME);
+    expect(m_cluster.getService(ZOOKEEPER_SERVICE_NAME)).andReturn(m_zookeeperService).anyTimes();
+    m_services.put(ZOOKEEPER_SERVICE_NAME, m_zookeeperService);
+    assertEquals(1, m_services.size());
+
+    Map<String, Object> requestMap = new HashMap<>();
+    requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_REVERT_UPGRADE_ID, "1");
+
+    replayAll();
+
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
+
+    assertEquals(Direction.DOWNGRADE, context.getDirection());
+    assertEquals(RepositoryType.PATCH, context.getOrchestrationType());
+    assertEquals(1, context.getSupportedServices().size());
+    assertTrue(context.isPatchRevert());
+
+    verifyAll();
+  }
+
   /**
    * Tests that if a different {@link UpgradeEntity} is returned instead of the one
    * specified by the

+ 145 - 5
ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorTest.java

@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -79,6 +80,9 @@ public class KerberosDescriptorTest {
           "        {" +
           "          \"name\": \"service1_spnego\"," +
           "          \"reference\": \"/spnego\"" +
+          "        }," +
+          "        {" +
+          "          \"name\": \"service1_identity\"" +
           "        }" +
           "      ]," +
           "      \"name\": \"SERVICE1\"" +
@@ -87,6 +91,39 @@ public class KerberosDescriptorTest {
           "      \"identities\": [" +
           "        {" +
           "          \"name\": \"/spnego\"" +
+          "        }," +
+          "        {" +
+          "          \"name\": \"service2_identity\"" +
+          "        }" +
+          "      ]," +
+          "      \"components\": [" +
+          "        {" +
+          "          \"identities\": [" +
+          "            {" +
+          "              \"name\": \"component1_identity\"" +
+          "            }," +
+          "            {" +
+          "              \"name\": \"service2_component1_service1_identity\"," +
+          "              \"reference\": \"/SERVICE1/service1_identity\"" +
+          "            }," +
+          "            {" +
+          "              \"name\": \"service2_component1_component1_identity\"," +
+          "              \"reference\": \"./component1_identity\"" +
+          "            }," +
+          "            {" +
+          "              \"name\": \"service2_component1_service2_identity\"," +
+          "              \"reference\": \"../service2_identity\"" +
+          "            }" +
+          "          ]," +
+          "          \"name\": \"COMPONENT21\"" +
+          "        }," +
+          "        {" +
+          "          \"identities\": [" +
+          "            {" +
+          "              \"name\": \"component2_identity\"" +
+          "            }" +
+          "          ]," +
+          "          \"name\": \"COMPONENT22\"" +
           "        }" +
           "      ]," +
           "      \"name\": \"SERVICE2\"" +
@@ -547,15 +584,118 @@ public class KerberosDescriptorTest {
     // Reference is determined using the "reference" attribute
     serviceDescriptor = kerberosDescriptor.getService("SERVICE1");
     identities = serviceDescriptor.getIdentities(true, null);
-    Assert.assertEquals(1, identities.size());
-    Assert.assertEquals("service1_spnego", identities.get(0).getName());
-    Assert.assertEquals("/spnego", identities.get(0).getReference());
+    Assert.assertEquals(2, identities.size());
+    for (KerberosIdentityDescriptor identity : identities) {
+      if (identity.isReference()) {
+        Assert.assertEquals("service1_spnego", identity.getName());
+        Assert.assertEquals("/spnego", identity.getReference());
+      } else {
+        Assert.assertEquals("service1_identity", identity.getName());
+        Assert.assertNull(identity.getReference());
+      }
+    }
+
+    Assert.assertEquals("service1_identity", identities.get(1).getName());
+    Assert.assertNull(identities.get(1).getReference());
 
     // Reference is determined using the "name" attribute
     serviceDescriptor = kerberosDescriptor.getService("SERVICE2");
     identities = serviceDescriptor.getIdentities(true, null);
+    Assert.assertEquals(2, identities.size());
+    for (KerberosIdentityDescriptor identity : identities) {
+      if (identity.isReference()) {
+        Assert.assertEquals("/spnego", identity.getName());
+        Assert.assertNull(identity.getReference());
+      } else {
+        Assert.assertEquals("service2_identity", identity.getName());
+        Assert.assertNull(identity.getReference());
+      }
+    }
+  }
+
+  @Test
+  public void testGetPath() throws Exception {
+    KerberosDescriptor kerberosDescriptor;
+    KerberosServiceDescriptor serviceDescriptor;
+    List<KerberosIdentityDescriptor> identities;
+
+    kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE);
+
+    serviceDescriptor = kerberosDescriptor.getService("SERVICE_NAME");
+    identities = serviceDescriptor.getIdentities(false, null);
+    Assert.assertEquals(1, identities.size());
+    Assert.assertEquals("/SERVICE_NAME/identity_1", identities.get(0).getPath());
+
+    KerberosComponentDescriptor componentDescriptor = serviceDescriptor.getComponent("COMPONENT_NAME");
+    identities = componentDescriptor.getIdentities(false, null);
     Assert.assertEquals(1, identities.size());
-    Assert.assertEquals("/spnego", identities.get(0).getName());
-    Assert.assertNull(identities.get(0).getReference());
+    Assert.assertEquals("/SERVICE_NAME/COMPONENT_NAME/identity_1", identities.get(0).getPath());
+
+
+    kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE_IDENTITY_REFERENCES);
+
+    serviceDescriptor = kerberosDescriptor.getService("SERVICE1");
+    identities = serviceDescriptor.getIdentities(true, null);
+    Assert.assertEquals(2, identities.size());
+    Assert.assertEquals("/SERVICE1/service1_spnego", identities.get(0).getPath());
+    Assert.assertEquals("/SERVICE1/service1_identity", identities.get(1).getPath());
+  }
+
+  @Test
+  public void testGetReferences() throws Exception {
+    KerberosDescriptor kerberosDescriptor = KERBEROS_DESCRIPTOR_FACTORY.createInstance(JSON_VALUE_IDENTITY_REFERENCES);
+    KerberosIdentityDescriptor identity;
+    List<KerberosIdentityDescriptor> references;
+    Set<String> paths;
+
+    // Find all references to /spnego
+    identity = kerberosDescriptor.getIdentity("spnego");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(2, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE1/service1_spnego"));
+    Assert.assertTrue(paths.contains("/SERVICE2//spnego"));
+
+    // Find all references to /SERVICE1/service1_identity
+    identity = kerberosDescriptor.getService("SERVICE1").getIdentity("service1_identity");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(1, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_service1_identity"));
+
+    // Find all references to /SERVICE2/COMPONENT21/component1_identity (testing ./)
+    identity = kerberosDescriptor.getService("SERVICE2").getComponent("COMPONENT21").getIdentity("component1_identity");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(1, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_component1_identity"));
+
+    // Find all references to /SERVICE2/component2_identity (testing ../)
+    identity = kerberosDescriptor.getService("SERVICE2").getIdentity("service2_identity");
+    references = identity.findReferences();
+
+    Assert.assertNotNull(references);
+    Assert.assertEquals(1, references.size());
+
+    paths = collectPaths(references);
+    Assert.assertTrue(paths.contains("/SERVICE2/COMPONENT21/service2_component1_service2_identity"));
+  }
+
+  private Set<String> collectPaths(List<KerberosIdentityDescriptor> identityDescriptors) {
+    Set<String> paths = new HashSet<>();
+    for (KerberosIdentityDescriptor identityDescriptor : identityDescriptors) {
+      paths.add(identityDescriptor.getPath());
+    }
+    return paths;
   }
+
 }

+ 2 - 0
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -95,8 +95,10 @@ class TestHBaseMaster(RMFTestCase):
                          try_install=True,
                          os_type=('Redhat', '6.4', 'Final'),
                          checked_call_mocks = [(0, "OK.", "")],
+                         available_packages_in_repos = ['hbase_2_3_0_1_1234'],
                          )
 
+
       # only assert that the correct package is trying to be installed
       self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
                                 retry_count=5,

+ 17 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py

@@ -666,3 +666,20 @@ class TestDatanode(RMFTestCase):
     self.assertEquals(
       ('hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
       mocks_dict['checked_call'].call_args_list[0][0][0])
+
+  def test_reload_configs(self):
+      with self.assertRaises(Fail):
+          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+                             classname = "DataNode",
+                             command = "reload_configs",
+                             config_file = "default.json",
+                             stack_version = self.STACK_VERSION,
+                             target = RMFTestCase.TARGET_COMMON_SERVICES
+                             )
+
+          # self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -reconfig namenode c6401.ambari.apache.org:8020 start",
+          #                       tries=115,
+          #                       try_sleep=10,
+          #                       user="hdfs",
+          #                       logoutput=True
+          #                       )

+ 33 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -1745,6 +1745,39 @@ class TestNamenode(RMFTestCase):
     get_namenode_states_mock.return_value = active_namenodes, standby_namenodes, unknown_namenodes
     self.assertFalse(is_this_namenode_active())
 
+  def test_reloadproxyusers(self):
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                         classname = "NameNode",
+                         command = "reloadproxyusers",
+                         config_file = "default.json",
+                         stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+                         )
+
+      self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshSuperUserGroupsConfiguration',
+                                user = 'hdfs',
+                                conf_dir = '/etc/hadoop/conf',
+                                bin_dir = '/usr/bin')
+      self.assertNoMoreResources()
+
+  def test_reload_configs(self):
+      with self.assertRaises(Fail):
+          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                             classname = "NameNode",
+                             command = "reload_configs",
+                             config_file = "default.json",
+                             stack_version = self.STACK_VERSION,
+                             target = RMFTestCase.TARGET_COMMON_SERVICES
+                             )
+
+      # self.assertResourceCalled('Execute', "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -reconfig namenode c6401.ambari.apache.org:8020 start",
+      #                       tries=115,
+      #                       try_sleep=10,
+      #                       user="hdfs",
+      #                       logoutput=True
+      #                       )
+
+
 
 class Popen_Mock:
   return_value = 1

+ 24 - 77
ambari-server/src/test/python/stacks/2.6/ZEPPELIN/test_zeppelin_070.py

@@ -305,67 +305,32 @@ class TestZeppelin070(RMFTestCase):
                               security_enabled=False,
                               )
 
-    self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['download_on_execute'],
-    )
-
     self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
-                              content=interpreter_json_generated.template_after_base,
+                          content=interpreter_json_generated.template_after_base,
+                          owner='zeppelin',
+                          group='zeppelin',
+                          )
+
+    self.assertResourceCalled('HdfsResource',
+                              '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
+                              security_enabled=False,
+                              hadoop_bin_dir='/usr/hdp/2.5.0.0-1235/hadoop/bin',
+                              keytab=UnknownConfigurationMock(),
+                              source='/etc/zeppelin/conf/interpreter.json',
+                              default_fs='hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hdfs_site={u'a': u'b'},
+                              kinit_path_local='/usr/bin/kinit',
+                              principal_name=UnknownConfigurationMock(),
+                              user='zeppelin',
                               owner='zeppelin',
                               group='zeppelin',
+                              replace_existing_files=True,
+                              hadoop_conf_dir='/usr/hdp/2.5.0.0-1235/hadoop/conf',
+                              type='file',
+                              action=['create_on_execute'],
                               )
 
-    self.assertResourceCalled('HdfsResource', '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/etc/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        replace_existing_files = True,
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['create_on_execute'],
-    )
-
-    self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['download_on_execute'],
-    )
-
     self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
                               content=interpreter_json_generated.template_after_without_spark_and_livy,
                               owner='zeppelin',
@@ -383,7 +348,7 @@ class TestZeppelin070(RMFTestCase):
         hdfs_site = {u'a': u'b'},
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
+        user = 'zeppelin',
         owner = 'zeppelin',
         group = 'zeppelin',
         hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
@@ -391,24 +356,6 @@ class TestZeppelin070(RMFTestCase):
         action = ['create_on_execute'],
     )
 
-    self.assertResourceCalled('HdfsResource', '/etc/zeppelin/conf/interpreter.json',
-        security_enabled = False,
-        hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
-        keytab = UnknownConfigurationMock(),
-        source = '/user/zeppelin/hdfs:///user/zeppelin/conf/interpreter.json',
-        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-        hdfs_site = {u'a': u'b'},
-        kinit_path_local = '/usr/bin/kinit',
-        principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
-        owner = 'zeppelin',
-        group = 'zeppelin',
-        hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
-        type = 'file',
-        action = ['download_on_execute'],
-    )
-
     self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
                               content=interpreter_json_generated.template_after_kerberos,
                               owner='zeppelin',
@@ -421,12 +368,12 @@ class TestZeppelin070(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         source = '/etc/zeppelin/conf/interpreter.json',
         default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-        replace_existing_files = True,
         hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hdfs_site = {u'a': u'b'},
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
-        user = 'hdfs',
+        replace_existing_files = True,
+        user = 'zeppelin',
         owner = 'zeppelin',
         group = 'zeppelin',
         hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',

+ 3 - 1
ambari-server/src/test/python/stacks/utils/RMFTestCase.py

@@ -80,7 +80,8 @@ class RMFTestCase(TestCase):
                     mocks_dict={},
                     try_install=False,
                     command_args=[],
-                    log_out_files=False):
+                    log_out_files=False,
+                    available_packages_in_repos = []):
 
     norm_path = os.path.normpath(path)
 
@@ -125,6 +126,7 @@ class RMFTestCase(TestCase):
         Script.instance = None
         script_class_inst = RMFTestCase._get_attr(script_module, classname)()
         script_class_inst.log_out_files = log_out_files
+        script_class_inst.available_packages_in_repos = available_packages_in_repos
         method = RMFTestCase._get_attr(script_class_inst, command)
     except IOError, err:
       raise RuntimeError("Cannot load class %s from %s: %s" % (classname, norm_path, err.message))

+ 1 - 1
ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml

@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

+ 1 - 1
ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml

@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

+ 8 - 0
ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml

@@ -444,4 +444,12 @@ don't exist, they will be created with this permission.</description>
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>hadoop.caller.context.enabled</name>
+    <value>true</value>
+    <on-ambari-upgrade add="false"/>
+    <supported-refresh-commands>
+      <refresh-command componentName="NAMENODE" command="reload_configs" />
+    </supported-refresh-commands>
+  </property>
 </configuration>

+ 22 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.3</upgrade>
+    </versions>
+</metainfo>

+ 63 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml

@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 145 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml

@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>

+ 223 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml

@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

+ 137 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml

@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

+ 199 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml

@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

+ 396 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml

@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

+ 30 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 20 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py

@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

+ 26 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 23 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 2 - 0
ambari-web/app/assets/test/tests.js

@@ -125,6 +125,7 @@ var files = [
   'test/controllers/main/service/item_test',
   'test/controllers/main/service/info/config_test',
   'test/controllers/main/service/info/summary_test',
+  'test/controllers/main/service/info/metric_test',
   'test/controllers/main/service_test',
   'test/controllers/main/admin_test',
   'test/controllers/main/views_controller_test',
@@ -340,6 +341,7 @@ var files = [
   'test/views/main/service/service_test',
   'test/views/main/service/info/config_test',
   'test/views/main/service/info/summary_test',
+  'test/views/main/service/info/metrics_view_test',
   'test/views/main/service/info/menu_test',
   'test/views/main/service/info/component_list_view_test',
   'test/views/main/service/info/metrics/ambari_metrics/regionserver_base_test',

+ 1 - 0
ambari-web/app/controllers.js

@@ -142,6 +142,7 @@ require('controllers/main/charts');
 require('controllers/main/charts/heatmap_metrics/heatmap_metric');
 require('controllers/main/charts/heatmap');
 require('controllers/main/service/info/heatmap');
+require('controllers/main/service/info/metric');
 require('controllers/main/views_controller');
 require('controllers/main/views/details_controller');
 require('controllers/wizard/step0_controller');

+ 468 - 0
ambari-web/app/controllers/main/service/info/metric.js

@@ -0,0 +1,468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+var App = require('app');
+
+App.MainServiceInfoMetricsController = Em.Controller.extend(App.WidgetSectionMixin, {
+  name: 'mainServiceInfoMetricsController',
+
+  layoutNameSuffix: "_dashboard",
+
+  sectionNameSuffix: "_SUMMARY",
+
+  /**
+   * Some widget has type `GRAPH`
+   *
+   * @type {boolean}
+   */
+  someWidgetGraphExists: Em.computed.someBy('widgets', 'widgetType', 'GRAPH'),
+
+  /**
+   * @type {boolean}
+   */
+  showTimeRangeControl: Em.computed.or('!isServiceWithEnhancedWidgets', 'someWidgetGraphExists'),
+
+  /**
+   * @type {boolean}
+   */
+  isWidgetLayoutsLoaded: false,
+
+  /**
+   * @type {boolean}
+   */
+  isAllSharedWidgetsLoaded: false,
+
+  /**
+   * @type {boolean}
+   */
+  isMineWidgetsLoaded: false,
+
+  /**
+   * load widget layouts across all users in CLUSTER scope
+   * @returns {$.ajax}
+   */
+  loadWidgetLayouts: function () {
+    this.set('isWidgetLayoutsLoaded', false);
+    return App.ajax.send({
+      name: 'widgets.layouts.get',
+      sender: this,
+      data: {
+        sectionName: this.get('sectionName')
+      },
+      success: 'loadWidgetLayoutsSuccessCallback'
+    });
+  },
+
+  loadWidgetLayoutsSuccessCallback: function (data) {
+    App.widgetLayoutMapper.map(data);
+    this.set('isWidgetLayoutsLoaded', true);
+  },
+
+
+  /**
+   * load all shared widgets to show on widget browser
+   * @returns {$.ajax}
+   */
+  loadAllSharedWidgets: function () {
+    this.set('isAllSharedWidgetsLoaded', false);
+    return App.ajax.send({
+      name: 'widgets.all.shared.get',
+      sender: this,
+      success: 'loadAllSharedWidgetsSuccessCallback'
+    });
+  },
+
+  /**
+   * success callback of <code>loadAllSharedWidgets</code>
+   * @param {object|null} data
+   */
+  loadAllSharedWidgetsSuccessCallback: function (data) {
+    var widgetIds = this.get('widgets').mapProperty('id');
+    if (data.items[0] && data.items.length) {
+      this.set("allSharedWidgets",
+        data.items.filter(function (widget) {
+          return widget.WidgetInfo.widget_type != "HEATMAP";
+        }).map(function (widget) {
+          var widgetType = widget.WidgetInfo.widget_type;
+          var widgetName = widget.WidgetInfo.widget_name;
+          var widgetId =  widget.WidgetInfo.id;
+          return Em.Object.create({
+            id: widgetId,
+            widgetName: widgetName,
+            description: widget.WidgetInfo.description,
+            widgetType: widgetType,
+            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
+            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
+            added: widgetIds.contains(widgetId),
+            isShared: widget.WidgetInfo.scope == "CLUSTER"
+          });
+        })
+      );
+    }
+    this.set('isAllSharedWidgetsLoaded', true);
+  },
+
+  allSharedWidgets: [],
+  mineWidgets: [],
+
+  /**
+   * load all mine widgets of current user to show on widget browser
+   * @returns {$.ajax}
+   */
+  loadMineWidgets: function () {
+    this.set('isMineWidgetsLoaded', false);
+    return App.ajax.send({
+      name: 'widgets.all.mine.get',
+      sender: this,
+      data: {
+        loginName: App.router.get('loginName')
+      },
+      success: 'loadMineWidgetsSuccessCallback'
+    });
+  },
+
+  /**
+   * success callback of <code>loadMineWidgets</code>
+   * @param {object|null} data
+   */
+  loadMineWidgetsSuccessCallback: function (data) {
+    var widgetIds = this.get('widgets').mapProperty('id');
+    if (data.items[0] && data.items.length) {
+      this.set("mineWidgets",
+        data.items.filter(function (widget) {
+          return widget.WidgetInfo.widget_type != "HEATMAP";
+        }).map(function (widget) {
+          var widgetType = widget.WidgetInfo.widget_type;
+          var widgetName = widget.WidgetInfo.widget_name;
+          var widgetId =  widget.WidgetInfo.id;
+          return Em.Object.create({
+            id: widget.WidgetInfo.id,
+            widgetName: widgetName,
+            description: widget.WidgetInfo.description,
+            widgetType: widgetType,
+            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
+            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
+            added: widgetIds.contains(widgetId),
+            isShared: widget.WidgetInfo.scope == "CLUSTER"
+          });
+        })
+      );
+    } else {
+      this.set("mineWidgets", []);
+    }
+    this.set('isMineWidgetsLoaded', true);
+  },
+
+  /**
+   * add widgets, on click handler for "Add"
+   */
+  addWidget: function (event) {
+    var widgetToAdd = event.context;
+    var activeLayout = this.get('activeWidgetLayout');
+    var widgetIds = activeLayout.get('widgets').map(function(widget) {
+      return {
+        "id": widget.get("id")
+      }
+    });
+    widgetIds.pushObject({
+      "id": widgetToAdd.id
+    });
+    var data = {
+      "WidgetLayoutInfo": {
+        "display_name": activeLayout.get("displayName"),
+        "id": activeLayout.get("id"),
+        "layout_name": activeLayout.get("layoutName"),
+        "scope": activeLayout.get("scope"),
+        "section_name": activeLayout.get("sectionName"),
+        "widgets": widgetIds
+      }
+    };
+
+    widgetToAdd.set('added', !widgetToAdd.added);
+    return App.ajax.send({
+      name: 'widget.layout.edit',
+      sender: this,
+      data: {
+        layoutId: activeLayout.get("id"),
+        data: data
+      },
+      success: 'updateActiveLayout'
+    });
+  },
+
+  /**
+   * hide widgets, on click handler for "Added"
+   */
+  hideWidget: function (event) {
+    var widgetToHide = event.context;
+    var activeLayout = this.get('activeWidgetLayout');
+    var widgetIds = activeLayout.get('widgets').map(function (widget) {
+      return {
+        "id": widget.get("id")
+      }
+    });
+    var data = {
+      "WidgetLayoutInfo": {
+        "display_name": activeLayout.get("displayName"),
+        "id": activeLayout.get("id"),
+        "layout_name": activeLayout.get("layoutName"),
+        "scope": activeLayout.get("scope"),
+        "section_name": activeLayout.get("sectionName"),
+        "widgets": widgetIds.filter(function (widget) {
+          return widget.id !== widgetToHide.id;
+        })
+      }
+    };
+
+    widgetToHide.set('added', !widgetToHide.added);
+    return App.ajax.send({
+      name: 'widget.layout.edit',
+      sender: this,
+      data: {
+        layoutId: activeLayout.get("id"),
+        data: data
+      },
+      success: 'hideWidgetSuccessCallback'
+    });
+
+  },
+
+  /**
+   * @param {object|null} data
+   * @param {object} opt
+   * @param {object} params
+   */
+  hideWidgetSuccessCallback: function (data, opt, params) {
+    params.data.WidgetLayoutInfo.widgets = params.data.WidgetLayoutInfo.widgets.map(function (widget) {
+      return {
+        WidgetInfo: {
+          id: widget.id
+        }
+      }
+    });
+    App.widgetLayoutMapper.map({items: [params.data]});
+    this.propertyDidChange('widgets');
+  },
+
+  /**
+   * update current active widget layout
+   */
+  updateActiveLayout: function () {
+    this.getActiveWidgetLayout();
+  },
+
+  /**
+   * delete widgets, on click handler for "Delete"
+   */
+  deleteWidget: function (event) {
+    var widget = event.context;
+    var self = this;
+    var confirmMsg =  widget.get('isShared') ? Em.I18n.t('dashboard.widgets.browser.action.delete.shared.bodyMsg').format(widget.widgetName) :  Em.I18n.t('dashboard.widgets.browser.action.delete.mine.bodyMsg').format(widget.widgetName);
+    var bodyMessage = Em.Object.create({
+      confirmMsg: confirmMsg,
+      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.delete.btnMsg')
+    });
+    return App.showConfirmationFeedBackPopup(function (query) {
+      return App.ajax.send({
+        name: 'widget.action.delete',
+        sender: self,
+        data: {
+          id: widget.id
+        },
+        success: 'updateWidgetBrowser'
+      });
+
+    }, bodyMessage);
+  },
+
+  /**
+   * update widget browser content after deleted some widget
+   */
+  updateWidgetBrowser: function () {
+    this.loadAllSharedWidgets();
+    this.loadMineWidgets();
+  },
+
+  /**
+   * Share widgets, on click handler for "Share"
+   */
+  shareWidget: function (event) {
+    var widget = event.context;
+    var self = this;
+    var bodyMessage = Em.Object.create({
+      confirmMsg: Em.I18n.t('dashboard.widgets.browser.action.share.confirmation'),
+      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.share')
+    });
+    return App.showConfirmationFeedBackPopup(function (query) {
+      return App.ajax.send({
+        name: 'widgets.wizard.edit',
+        sender: self,
+        data: {
+          data: {
+            "WidgetInfo": {
+              "widget_name": widget.get("widgetName"),
+              "scope": "CLUSTER"
+            }
+          },
+          widgetId: widget.get("id")
+        },
+        success: 'updateWidgetBrowser'
+      });
+    }, bodyMessage);
+  },
+
+  /**
+   * create widget
+   */
+  createWidget: function () {
+    App.router.send('createServiceWidget', Em.Object.create({
+      layout: this.get('activeWidgetLayout'),
+      serviceName: this.get('content.serviceName')
+    }));
+  },
+
+  /**
+   * edit widget
+   * @param {App.Widget} content
+   */
+  editWidget: function (content) {
+    content.set('serviceName', this.get('content.serviceName'));
+    App.router.send('editServiceWidget', content);
+  },
+
+  /**
+   * launch Widgets Browser popup
+   * @method showPopup
+   * @return {App.ModalPopup}
+   */
+  goToWidgetsBrowser: function () {
+    var self = this;
+
+    return App.ModalPopup.show({
+      header: Em.I18n.t('dashboard.widgets.browser.header'),
+
+      classNames: ['common-modal-wrapper', 'widgets-browser-popup'],
+      modalDialogClasses: ['modal-lg'],
+      onPrimary: function () {
+        this.hide();
+        self.set('isAllSharedWidgetsLoaded', false);
+        self.set('allSharedWidgets', []);
+        self.set('isMineWidgetsLoaded', false);
+        self.set('mineWidgets', []);
+      },
+      autoHeight: false,
+      isHideBodyScroll: false,
+      footerClass: Ember.View.extend({
+        templateName: require('templates/common/modal_popups/widget_browser_footer'),
+        isShowMineOnly: false,
+        onPrimary: function() {
+          this.get('parentView').onPrimary();
+        }
+      }),
+      isShowMineOnly: false,
+      bodyClass: Ember.View.extend({
+        templateName: require('templates/common/modal_popups/widget_browser_popup'),
+        controller: self,
+        willInsertElement: function () {
+          this.get('controller').loadAllSharedWidgets();
+          this.get('controller').loadMineWidgets();
+        },
+
+        isLoaded: Em.computed.and('controller.isAllSharedWidgetsLoaded', 'controller.isMineWidgetsLoaded'),
+
+        isWidgetEmptyList: Em.computed.empty('filteredContent'),
+
+        activeService: '',
+        activeStatus: '',
+
+        content: function () {
+          if (this.get('parentView.isShowMineOnly')) {
+            return this.get('controller.mineWidgets');
+          } else {
+            // merge my widgets and all shared widgets, no duplicated is allowed
+            var content = [];
+            var widgetMap = {};
+            var allWidgets = this.get('controller.allSharedWidgets').concat(this.get('controller.mineWidgets'));
+            allWidgets.forEach(function(widget) {
+              if (!widgetMap[widget.get("id")]) {
+                content.pushObject(widget);
+                widgetMap[widget.get("id")] = true;
+              }
+            });
+            return content;
+          }
+        }.property('controller.allSharedWidgets.length', 'controller.isAllSharedWidgetsLoaded',
+          'controller.mineWidgets.length', 'controller.isMineWidgetsLoaded', 'parentView.isShowMineOnly'),
+
+        /**
+         * displaying content filtered by service name and status.
+         */
+        filteredContent: function () {
+          var activeService = this.get('activeService') ? this.get('activeService') : this.get('controller.content.serviceName');
+          var result = [];
+          this.get('content').forEach(function (widget) {
+            if (widget.get('serviceName').indexOf(activeService) >= 0) {
+              result.pushObject(widget);
+            }
+          });
+          return result;
+        }.property('content', 'activeService', 'activeStatus'),
+
+        /**
+         * service name filter
+         */
+        services: function () {
+          var view = this;
+          var services = App.Service.find().filter(function(item){
+            var stackService =  App.StackService.find().findProperty('serviceName', item.get('serviceName'));
+            return stackService.get('isServiceWithWidgets');
+          });
+          return services.map(function (service) {
+            return Em.Object.create({
+              value: service.get('serviceName'),
+              label: service.get('displayName'),
+              isActive: function () {
+                var activeService = view.get('activeService') ? view.get('activeService') : view.get('controller.content.serviceName');
+                return this.get('value') == activeService;
+              }.property('value', 'view.activeService')
+            })
+          });
+        }.property('activeService'),
+
+        filterByService: function (event) {
+          this.set('activeService', event.context);
+        },
+
+        createWidget: function () {
+          this.get('parentView').onPrimary();
+          this.get('controller').createWidget();
+        },
+
+        ensureTooltip: function () {
+          Em.run.later(this, function () {
+            App.tooltip($("[rel='shared-icon-tooltip']"));
+          }, 1000);
+        }.observes('activeService', 'parentView.isShowMineOnly'),
+
+        didInsertElement: function () {
+          this.ensureTooltip();
+        }
+      })
+    });
+  }
+
+});

+ 1 - 448
ambari-web/app/controllers/main/service/info/summary.js

@@ -17,7 +17,7 @@
 
 var App = require('app');
 
-App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMixin, {
+App.MainServiceInfoSummaryController = Em.Controller.extend({
   name: 'mainServiceInfoSummaryController',
 
   selectedFlumeAgent: null,
@@ -40,10 +40,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
    */
   isPreviousRangerConfigsCallFailed: false,
 
-  layoutNameSuffix: "_dashboard",
-
-  sectionNameSuffix: "_SUMMARY",
-
   /**
    * HiveServer2 JDBC connection endpoint data
    * @type {array}
@@ -111,18 +107,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
     }
   ],
 
-  /**
-   * Some widget has type `GRAPH`
-   *
-   * @type {boolean}
-   */
-  someWidgetGraphExists: Em.computed.someBy('widgets', 'widgetType', 'GRAPH'),
-
-  /**
-   * @type {boolean}
-   */
-  showTimeRangeControl: Em.computed.or('!isServiceWithEnhancedWidgets', 'someWidgetGraphExists'),
-
   /**
    * Set initial Ranger plugins data
    * @method setRangerPlugins
@@ -425,437 +409,6 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
     });
   },
 
-
-  /**
-   * @type {boolean}
-   */
-  isWidgetLayoutsLoaded: false,
-
-  /**
-   * @type {boolean}
-   */
-  isAllSharedWidgetsLoaded: false,
-
-  /**
-   * @type {boolean}
-   */
-  isMineWidgetsLoaded: false,
-
-
-  /**
-   * load widget layouts across all users in CLUSTER scope
-   * @returns {$.ajax}
-   */
-  loadWidgetLayouts: function () {
-    this.set('isWidgetLayoutsLoaded', false);
-    return App.ajax.send({
-      name: 'widgets.layouts.get',
-      sender: this,
-      data: {
-        sectionName: this.get('sectionName')
-      },
-      success: 'loadWidgetLayoutsSuccessCallback'
-    });
-  },
-
-  loadWidgetLayoutsSuccessCallback: function (data) {
-    App.widgetLayoutMapper.map(data);
-    this.set('isWidgetLayoutsLoaded', true);
-  },
-
-
-  /**
-   * load all shared widgets to show on widget browser
-   * @returns {$.ajax}
-   */
-  loadAllSharedWidgets: function () {
-    this.set('isAllSharedWidgetsLoaded', false);
-    return App.ajax.send({
-      name: 'widgets.all.shared.get',
-      sender: this,
-      success: 'loadAllSharedWidgetsSuccessCallback'
-    });
-  },
-
-  /**
-   * success callback of <code>loadAllSharedWidgets</code>
-   * @param {object|null} data
-   */
-  loadAllSharedWidgetsSuccessCallback: function (data) {
-    var widgetIds = this.get('widgets').mapProperty('id');
-    if (data.items[0] && data.items.length) {
-      this.set("allSharedWidgets",
-        data.items.filter(function (widget) {
-          return widget.WidgetInfo.widget_type != "HEATMAP";
-        }).map(function (widget) {
-          var widgetType = widget.WidgetInfo.widget_type;
-          var widgetName = widget.WidgetInfo.widget_name;
-          var widgetId =  widget.WidgetInfo.id;
-          return Em.Object.create({
-            id: widgetId,
-            widgetName: widgetName,
-            description: widget.WidgetInfo.description,
-            widgetType: widgetType,
-            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
-            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
-            added: widgetIds.contains(widgetId),
-            isShared: widget.WidgetInfo.scope == "CLUSTER"
-          });
-        })
-      );
-    }
-    this.set('isAllSharedWidgetsLoaded', true);
-  },
-
-  allSharedWidgets: [],
-  mineWidgets: [],
-
-  /**
-   * load all mine widgets of current user to show on widget browser
-   * @returns {$.ajax}
-   */
-  loadMineWidgets: function () {
-    this.set('isMineWidgetsLoaded', false);
-    return App.ajax.send({
-      name: 'widgets.all.mine.get',
-      sender: this,
-      data: {
-        loginName: App.router.get('loginName')
-      },
-      success: 'loadMineWidgetsSuccessCallback'
-    });
-  },
-
-  /**
-   * success callback of <code>loadMineWidgets</code>
-   * @param {object|null} data
-   */
-  loadMineWidgetsSuccessCallback: function (data) {
-    var widgetIds = this.get('widgets').mapProperty('id');
-    if (data.items[0] && data.items.length) {
-      this.set("mineWidgets",
-        data.items.filter(function (widget) {
-          return widget.WidgetInfo.widget_type != "HEATMAP";
-        }).map(function (widget) {
-          var widgetType = widget.WidgetInfo.widget_type;
-          var widgetName = widget.WidgetInfo.widget_name;
-          var widgetId =  widget.WidgetInfo.id;
-          return Em.Object.create({
-            id: widget.WidgetInfo.id,
-            widgetName: widgetName,
-            description: widget.WidgetInfo.description,
-            widgetType: widgetType,
-            iconPath: "/img/widget-" + widgetType.toLowerCase() + ".png",
-            serviceName: JSON.parse(widget.WidgetInfo.metrics).mapProperty('service_name').uniq().join('-'),
-            added: widgetIds.contains(widgetId),
-            isShared: widget.WidgetInfo.scope == "CLUSTER"
-          });
-        })
-      );
-    } else {
-      this.set("mineWidgets", []);
-    }
-    this.set('isMineWidgetsLoaded', true);
-  },
-
-  /**
-   * add widgets, on click handler for "Add"
-   */
-  addWidget: function (event) {
-    var widgetToAdd = event.context;
-    var activeLayout = this.get('activeWidgetLayout');
-    var widgetIds = activeLayout.get('widgets').map(function(widget) {
-      return {
-        "id": widget.get("id")
-      }
-    });
-    widgetIds.pushObject({
-      "id": widgetToAdd.id
-    });
-    var data = {
-      "WidgetLayoutInfo": {
-        "display_name": activeLayout.get("displayName"),
-        "id": activeLayout.get("id"),
-        "layout_name": activeLayout.get("layoutName"),
-        "scope": activeLayout.get("scope"),
-        "section_name": activeLayout.get("sectionName"),
-        "widgets": widgetIds
-      }
-    };
-
-    widgetToAdd.set('added', !widgetToAdd.added);
-    return App.ajax.send({
-      name: 'widget.layout.edit',
-      sender: this,
-      data: {
-        layoutId: activeLayout.get("id"),
-        data: data
-      },
-      success: 'updateActiveLayout'
-    });
-  },
-
-  /**
-   * hide widgets, on click handler for "Added"
-   */
-  hideWidget: function (event) {
-    var widgetToHide = event.context;
-    var activeLayout = this.get('activeWidgetLayout');
-    var widgetIds = activeLayout.get('widgets').map(function (widget) {
-      return {
-        "id": widget.get("id")
-      }
-    });
-    var data = {
-      "WidgetLayoutInfo": {
-        "display_name": activeLayout.get("displayName"),
-        "id": activeLayout.get("id"),
-        "layout_name": activeLayout.get("layoutName"),
-        "scope": activeLayout.get("scope"),
-        "section_name": activeLayout.get("sectionName"),
-        "widgets": widgetIds.filter(function (widget) {
-          return widget.id !== widgetToHide.id;
-        })
-      }
-    };
-
-    widgetToHide.set('added', !widgetToHide.added);
-    return App.ajax.send({
-      name: 'widget.layout.edit',
-      sender: this,
-      data: {
-        layoutId: activeLayout.get("id"),
-        data: data
-      },
-      success: 'hideWidgetSuccessCallback'
-    });
-
-  },
-
-  /**
-   * @param {object|null} data
-   * @param {object} opt
-   * @param {object} params
-   */
-  hideWidgetSuccessCallback: function (data, opt, params) {
-    params.data.WidgetLayoutInfo.widgets = params.data.WidgetLayoutInfo.widgets.map(function (widget) {
-      return {
-        WidgetInfo: {
-          id: widget.id
-        }
-      }
-    });
-    App.widgetLayoutMapper.map({items: [params.data]});
-    this.propertyDidChange('widgets');
-  },
-
-  /**
-   * update current active widget layout
-   */
-  updateActiveLayout: function () {
-    this.getActiveWidgetLayout();
-  },
-
-  /**
-   * delete widgets, on click handler for "Delete"
-   */
-  deleteWidget: function (event) {
-    var widget = event.context;
-    var self = this;
-    var confirmMsg =  widget.get('isShared') ? Em.I18n.t('dashboard.widgets.browser.action.delete.shared.bodyMsg').format(widget.widgetName) :  Em.I18n.t('dashboard.widgets.browser.action.delete.mine.bodyMsg').format(widget.widgetName);
-    var bodyMessage = Em.Object.create({
-      confirmMsg: confirmMsg,
-      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.delete.btnMsg')
-    });
-    return App.showConfirmationFeedBackPopup(function (query) {
-      return App.ajax.send({
-        name: 'widget.action.delete',
-        sender: self,
-        data: {
-          id: widget.id
-        },
-        success: 'updateWidgetBrowser'
-      });
-
-    }, bodyMessage);
-  },
-
-  /**
-   * update widget browser content after deleted some widget
-   */
-  updateWidgetBrowser: function () {
-    this.loadAllSharedWidgets();
-    this.loadMineWidgets();
-  },
-
-  /**
-   * Share widgets, on click handler for "Share"
-   */
-  shareWidget: function (event) {
-    var widget = event.context;
-    var self = this;
-    var bodyMessage = Em.Object.create({
-      confirmMsg: Em.I18n.t('dashboard.widgets.browser.action.share.confirmation'),
-      confirmButton: Em.I18n.t('dashboard.widgets.browser.action.share')
-    });
-    return App.showConfirmationFeedBackPopup(function (query) {
-      return App.ajax.send({
-        name: 'widgets.wizard.edit',
-        sender: self,
-        data: {
-          data: {
-            "WidgetInfo": {
-              "widget_name": widget.get("widgetName"),
-              "scope": "CLUSTER"
-            }
-          },
-          widgetId: widget.get("id")
-        },
-        success: 'updateWidgetBrowser'
-      });
-    }, bodyMessage);
-  },
-
-  /**
-   * create widget
-   */
-  createWidget: function () {
-    App.router.send('createServiceWidget', Em.Object.create({
-      layout: this.get('activeWidgetLayout'),
-      serviceName: this.get('content.serviceName')
-    }));
-  },
-
-  /**
-   * edit widget
-   * @param {App.Widget} content
-   */
-  editWidget: function (content) {
-    content.set('serviceName', this.get('content.serviceName'));
-    App.router.send('editServiceWidget', content);
-  },
-
-  /**
-   * launch Widgets Browser popup
-   * @method showPopup
-   * @return {App.ModalPopup}
-   */
-  goToWidgetsBrowser: function () {
-    var self = this;
-
-    return App.ModalPopup.show({
-      header: Em.I18n.t('dashboard.widgets.browser.header'),
-
-      classNames: ['common-modal-wrapper', 'widgets-browser-popup'],
-      modalDialogClasses: ['modal-lg'],
-      onPrimary: function () {
-        this.hide();
-        self.set('isAllSharedWidgetsLoaded', false);
-        self.set('allSharedWidgets', []);
-        self.set('isMineWidgetsLoaded', false);
-        self.set('mineWidgets', []);
-      },
-      autoHeight: false,
-      isHideBodyScroll: false,
-      footerClass: Ember.View.extend({
-        templateName: require('templates/common/modal_popups/widget_browser_footer'),
-        isShowMineOnly: false,
-        onPrimary: function() {
-          this.get('parentView').onPrimary();
-        }
-      }),
-      isShowMineOnly: false,
-      bodyClass: Ember.View.extend({
-        templateName: require('templates/common/modal_popups/widget_browser_popup'),
-        controller: self,
-        willInsertElement: function () {
-          this.get('controller').loadAllSharedWidgets();
-          this.get('controller').loadMineWidgets();
-        },
-
-        isLoaded: Em.computed.and('controller.isAllSharedWidgetsLoaded', 'controller.isMineWidgetsLoaded'),
-
-        isWidgetEmptyList: Em.computed.empty('filteredContent'),
-
-        activeService: '',
-        activeStatus: '',
-
-        content: function () {
-          if (this.get('parentView.isShowMineOnly')) {
-            return this.get('controller.mineWidgets');
-          } else {
-            // merge my widgets and all shared widgets, no duplicated is allowed
-            var content = [];
-            var widgetMap = {};
-            var allWidgets = this.get('controller.allSharedWidgets').concat(this.get('controller.mineWidgets'));
-            allWidgets.forEach(function(widget) {
-              if (!widgetMap[widget.get("id")]) {
-                content.pushObject(widget);
-                widgetMap[widget.get("id")] = true;
-              }
-            });
-            return content;
-          }
-        }.property('controller.allSharedWidgets.length', 'controller.isAllSharedWidgetsLoaded',
-          'controller.mineWidgets.length', 'controller.isMineWidgetsLoaded', 'parentView.isShowMineOnly'),
-
-        /**
-         * displaying content filtered by service name and status.
-         */
-        filteredContent: function () {
-          var activeService = this.get('activeService') ? this.get('activeService') : this.get('controller.content.serviceName');
-          var result = [];
-          this.get('content').forEach(function (widget) {
-            if (widget.get('serviceName').indexOf(activeService) >= 0) {
-              result.pushObject(widget);
-            }
-          });
-          return result;
-        }.property('content', 'activeService', 'activeStatus'),
-
-        /**
-         * service name filter
-         */
-        services: function () {
-          var view = this;
-          var services = App.Service.find().filter(function(item){
-            var stackService =  App.StackService.find().findProperty('serviceName', item.get('serviceName'));
-            return stackService.get('isServiceWithWidgets');
-          });
-          return services.map(function (service) {
-            return Em.Object.create({
-              value: service.get('serviceName'),
-              label: service.get('displayName'),
-              isActive: function () {
-                var activeService = view.get('activeService') ? view.get('activeService') : view.get('controller.content.serviceName');
-                return this.get('value') == activeService;
-              }.property('value', 'view.activeService')
-            })
-          });
-        }.property('activeService'),
-
-        filterByService: function (event) {
-          this.set('activeService', event.context);
-        },
-
-        createWidget: function () {
-          this.get('parentView').onPrimary();
-          this.get('controller').createWidget();
-        },
-
-        ensureTooltip: function () {
-          Em.run.later(this, function () {
-            App.tooltip($("[rel='shared-icon-tooltip']"));
-          }, 1000);
-        }.observes('activeService', 'parentView.isShowMineOnly'),
-
-        didInsertElement: function () {
-          this.ensureTooltip();
-        }
-      })
-    });
-  },
-
   goToView: function(event) {
     App.router.route(event.context.get('internalAmbariUrl'));
   }

+ 1 - 1
ambari-web/app/controllers/main/service/widgets/create/wizard_controller.js

@@ -417,7 +417,7 @@ App.WidgetWizardController = App.WizardController.extend({
     var self = this;
     var successCallBack = function() {
       self.get('popup').hide();
-      App.router.transitionTo('main.services.service.summary', service);
+      App.router.transitionTo('main.services.service.metrics', service);
       App.get('router.updateController').updateAll();
     };
 

+ 1 - 0
ambari-web/app/messages.js

@@ -2199,6 +2199,7 @@ Em.I18n.translations = {
   'services.service.info.menu.summary':'Summary',
   'services.service.info.menu.configs':'Configs',
   'services.service.info.menu.heatmaps':'Heatmaps',
+  'services.service.info.menu.metrics':'Metrics',
   'services.service.info.summary.hostsRunningMonitor':'{0}/{1}',
   'services.service.info.summary.serversHostCount':'{0} more',
 

+ 1 - 0
ambari-web/app/styles/common.less

@@ -71,6 +71,7 @@
 @top-nav-menu-dropdown-border-color: #c3c3c3;
 @top-nav-menu-dropdown-bg-color: #fff;
 @top-nav-menu-dropdown-text-color: #333;
+@top-nav-menu-views-menu-color: #1491c1;
 
 @-webkit-keyframes orangePulse {
   from { background-color: @restart-indicator-color; }

+ 25 - 1
ambari-web/app/styles/enhanced_service_dashboard.less

@@ -26,6 +26,10 @@
 
   clear: both;
 
+  .service-widgets-box {
+    padding: 10px 1.1% 10px 1.1%;
+  }
+
   #add-widget-action-box {
     background-color: @add-widget-btn-color;
     width: 97%;
@@ -69,7 +73,7 @@
     width: 93%;
   }
   .span2p4 {
-    width: 22.7%;
+    width: 24.4%;
     height: 100%;
     background-color: white;
     margin: 5px 0 5px 5px;
@@ -188,6 +192,26 @@
   }
 }
 
+@media (min-width: 1200px) {
+
+  .service-metrics-block .service-widgets-box {
+    padding: 10px 1.3% 10px 1.3%;
+  }
+
+  #widget_layout .span2p4 {
+    width: 24.5%;
+    *width: 24.5%;
+  }
+}
+
+@media (min-width: 1500px) {
+
+  #widget_layout .span2p4 {
+    width: 24.6%;
+    *width: 24.6%;
+  }
+}
+
 #widget-preview {
   max-width: 200px;
   margin: auto;

+ 46 - 18
ambari-web/app/styles/theme/bootstrap-ambari.css

@@ -464,7 +464,7 @@ h2.table-title {
 .nav.nav-tabs li a {
   border-width: 0;
   border-radius: 0;
-  border-bottom: 2px solid transparent;
+  border-bottom: 3px solid transparent;
   color: #6B6C6C;
   text-transform: uppercase;
 }
@@ -488,7 +488,7 @@ h2.table-title {
 .nav-tabs-left li,
 .nav-tabs-right li {
   float: none;
-  margin-bottom: 2px;
+  margin-bottom: 3px;
 }
 .nav-tabs-left li a,
 .nav-tabs-right li a {
@@ -498,25 +498,25 @@ h2.table-title {
   margin-right: -1px;
 }
 .nav-tabs-left li a {
-  border: 2px solid transparent !important;
+  border: 3px solid transparent !important;
 }
 .nav-tabs-left li.active a,
 .nav-tabs-left li.active a:hover,
 .nav-tabs-left li.active a:active,
 .nav-tabs-left li.active a:focus {
-  border-right: 2px solid #3FAE2A !important;
+  border-right: 3px solid #3FAE2A !important;
 }
 .nav-tabs-right li {
   margin-left: -1px;
 }
 .nav-tabs-right li a {
-  border: 2px solid transparent !important;
+  border: 3px solid transparent !important;
 }
 .nav-tabs-right li.active a,
 .nav-tabs-right li.active a:hover,
 .nav-tabs-right li.active a:active,
 .nav-tabs-right li.active a:focus {
-  border-left: 2px solid #3FAE2A !important;
+  border-left: 3px solid #3FAE2A !important;
 }
 .wizard {
   border: 2px solid #ebecf1;
@@ -797,8 +797,7 @@ input.radio:checked + label:after {
   cursor: pointer;
   margin-top: 3px;
 }
-.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.ambari-header,
-.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.toggle-icon {
+.navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group:hover span.ambari-header {
   color: #fff;
 }
 .navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group span.ambari-header {
@@ -890,7 +889,7 @@ input.radio:checked + label:after {
 .navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a .navigation-icon,
 .navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a .navigation-icon {
   line-height: 18px;
-  font-size: 14px;
+  font-size: 16px;
   color: #b8bec4;
 }
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a .toggle-icon,
@@ -904,12 +903,14 @@ input.radio:checked + label:after {
   color: #b8bec4;
   padding: 3px 5px 3px 10px;
 }
-.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
-.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a,
 .navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a,
 .navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a {
   padding: 10px 5px 10px 20px;
 }
+.navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a,
+.navigation-bar-container ul.nav.side-nav-footer li.navigation-footer > a {
+  padding: 14px 5px 14px 20px;
+}
 .navigation-bar-container ul.nav.side-nav-menu li.submenu-li > a,
 .navigation-bar-container ul.nav.side-nav-footer li.submenu-li > a {
   padding: 10px 5px 10px 25px;
@@ -922,7 +923,7 @@ input.radio:checked + label:after {
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
 .navigation-bar-container ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
   color: #3fae2a;
-  font-size: 20px;
+  font-size: 19px;
   position: relative;
   padding: 0 15px;
   left: calc(30%);
@@ -1021,7 +1022,7 @@ input.radio:checked + label:after {
   position: absolute;
   pointer-events: none;
   border-color: transparent;
-  border-left-color: #31823a;
+  border-left-color: #3fae2a;
   margin-top: -12px;
 }
 .navigation-bar-container ul.nav.side-nav-menu .more-actions,
@@ -1097,6 +1098,10 @@ input.radio:checked + label:after {
 .navigation-bar-container.collapsed ul.nav.side-nav-footer li a .toggle-icon {
   display: none;
 }
+.navigation-bar-container.collapsed ul.nav.side-nav-menu li a .navigation-icon,
+.navigation-bar-container.collapsed ul.nav.side-nav-footer li a .navigation-icon {
+  font-size: 19px;
+}
 .navigation-bar-container.collapsed ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
 .navigation-bar-container.collapsed ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
   padding: 0 5px;
@@ -1147,7 +1152,7 @@ input.radio:checked + label:after {
   position: absolute;
   pointer-events: none;
   border-color: transparent;
-  border-left-color: #31823a;
+  border-left-color: #3fae2a;
   margin-top: -12px;
 }
 .navigation-bar-container.collapsed ul.nav.side-nav-menu .more-actions,
@@ -1189,8 +1194,10 @@ input.radio:checked + label:after {
   position: relative;
   top: 1px;
 }
+.notifications-dropdown,
 #notifications-dropdown.dropdown-menu {
-  width: 400px;
+  min-width: 400px;
+  max-width: 400px;
   min-height: 150px;
   padding: 0px;
   z-index: 1000;
@@ -1202,6 +1209,7 @@ input.radio:checked + label:after {
   -moz-box-shadow: 0px 2px 10px 2px rgba(0, 0, 0, 0.29);
   box-shadow: 0px 2px 10px 2px rgba(0, 0, 0, 0.29);
 }
+.notifications-dropdown .popup-arrow-up,
 #notifications-dropdown.dropdown-menu .popup-arrow-up {
   position: absolute;
   right: 37px;
@@ -1210,6 +1218,7 @@ input.radio:checked + label:after {
   height: 40px;
   overflow: hidden;
 }
+.notifications-dropdown .popup-arrow-up:after,
 #notifications-dropdown.dropdown-menu .popup-arrow-up:after {
   content: "";
   position: absolute;
@@ -1221,10 +1230,12 @@ input.radio:checked + label:after {
   left: 10px;
   box-shadow: -1px -1px 10px -2px rgba(0, 0, 0, 0.5);
 }
+.notifications-dropdown .notifications-header,
 #notifications-dropdown.dropdown-menu .notifications-header {
   border-bottom: 1px solid #eee;
   padding: 15px 20px;
 }
+.notifications-dropdown .notifications-header .notifications-title,
 #notifications-dropdown.dropdown-menu .notifications-header .notifications-title {
   font-family: 'Roboto', sans-serif;
   font-weight: normal;
@@ -1233,19 +1244,23 @@ input.radio:checked + label:after {
   color: #333;
   font-size: 16px;
 }
+.notifications-dropdown .notifications-body,
 #notifications-dropdown.dropdown-menu .notifications-body {
   padding: 0px 15px;
   overflow: auto;
   max-height: 500px;
 }
+.notifications-dropdown .notifications-body .no-alert-text,
 #notifications-dropdown.dropdown-menu .notifications-body .no-alert-text {
   padding: 15px 5px;
 }
+.notifications-dropdown .notifications-body .table-controls,
 #notifications-dropdown.dropdown-menu .notifications-body .table-controls {
   padding: 10px 0px;
   margin: 0px;
   border-bottom: 1px solid #eee;
 }
+.notifications-dropdown .notifications-body .table-controls .state-filter,
 #notifications-dropdown.dropdown-menu .notifications-body .table-controls .state-filter {
   padding: 0px;
   font-family: 'Roboto', sans-serif;
@@ -1257,36 +1272,45 @@ input.radio:checked + label:after {
   color: #666;
   position: relative;
 }
+.notifications-dropdown .notifications-body .table-controls .state-filter .form-control.filter-select,
 #notifications-dropdown.dropdown-menu .notifications-body .table-controls .state-filter .form-control.filter-select {
   font-size: 12px;
   color: #666;
   height: 25px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table {
   margin-top: 0px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr {
   cursor: pointer;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover {
   cursor: default;
   border-color: transparent;
   border-bottom-color: #eee;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover > td,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody tr.no-alert-tr:hover > td {
   border-color: transparent;
   background-color: white;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status {
   width: 9%;
   padding: 15px 3px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status .alert-state-CRITICAL,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status .alert-state-CRITICAL {
   color: #EF6162;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.status .alert-state-WARNING,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.status .alert-state-WARNING {
   color: #E98A40;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content {
   width: 90%;
   padding: 15px 3px 10px 3px;
@@ -1297,12 +1321,14 @@ input.radio:checked + label:after {
   color: #333;
   line-height: 1.3;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .name,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .name {
   font-weight: bold;
   font-size: 14px;
   color: #333;
   margin-bottom: 5px;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .description,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .description {
   font-size: 12px;
   color: #666;
@@ -1327,11 +1353,13 @@ input.radio:checked + label:after {
   -webkit-hyphens: auto;
   hyphens: auto;
 }
+.notifications-dropdown .notifications-body .table.alerts-table tbody td.content .timestamp,
 #notifications-dropdown.dropdown-menu .notifications-body .table.alerts-table tbody td.content .timestamp {
   text-align: right;
   font-size: 11px;
   color: #999;
 }
+.notifications-dropdown .notifications-footer,
 #notifications-dropdown.dropdown-menu .notifications-footer {
   border-top: 1px solid #eee;
   padding: 15px;
@@ -1372,13 +1400,13 @@ input.radio:checked + label:after {
 }
 .accordion .panel-group,
 .wizard .wizard-body .wizard-content .accordion .panel-group {
-  margin-bottom: 0px;
+  margin-bottom: 0;
 }
 .accordion .panel-group .panel,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel {
-  border-radius: 0px;
+  border-radius: 0;
   border: none;
-  margin-top: 0px;
+  margin-top: 0;
   padding: 0 10px;
 }
 .accordion .panel-group .panel .panel-heading,

+ 22 - 0
ambari-web/app/styles/top-nav.less

@@ -109,6 +109,28 @@
     .top-nav-user {
       margin-top: 2px;
     }
+    .ambari-views {
+      margin-top: 17px;
+      padding: 0 20px 0 10px;
+      .notifications-dropdown.dropdown-menu {
+        right: -28px;
+        min-width: 200px;
+        max-width: 300px;
+        min-height: 100px;
+        li  {
+          padding: 2px 5px;
+          a {
+            font-size: 12px;
+            color: @top-nav-menu-views-menu-color;
+          }
+        }
+      }
+      i {
+        font-size: 20px;
+        color: @top-nav-menu-views-menu-color;
+        cursor: pointer;
+      }
+    }
   }
 
   #notifications-dropdown.dropdown-menu {

Некоторые файлы не были показаны из-за большого количества измененных файлов