Browse Source

Merge branch 'trunk' into branch-dev-patch-upgrade

Nate Cole 9 years ago
parent
commit
418745d129
100 changed files with 2224 additions and 457 deletions
  1. 41 22
      ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
  2. 12 0
      ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
  3. 2 11
      ambari-funtest/pom.xml
  4. 22 0
      ambari-funtest/src/main/assemblies/empty.xml
  5. 0 79
      ambari-funtest/src/main/assemblies/funtest.xml
  6. 20 2
      ambari-metrics/ambari-metrics-grafana/README.md
  7. 40 9
      ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
  8. BIN
      ambari-metrics/ambari-metrics-grafana/screenshots/21-multi-templating.png
  9. 3 3
      ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
  10. 1 1
      ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
  11. 9 0
      ambari-server/conf/unix/log4j.properties
  12. 9 0
      ambari-server/conf/windows/log4j.properties
  13. 9 0
      ambari-server/src/main/conf/log4j.properties
  14. 1 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/GroupResourceDefinition.java
  15. 4 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java
  16. 76 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/GroupPrivilegeService.java
  17. 11 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/GroupService.java
  18. 104 51
      ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java
  19. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
  20. 237 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/GroupPrivilegeResourceProvider.java
  21. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java
  22. 66 10
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
  23. 3 4
      ambari-server/src/main/python/ambari_server/checkDatabase.py
  24. 1 0
      ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
  25. 1 0
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
  26. 1 0
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
  27. 2 2
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-check-env.xml
  28. 56 69
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/kerberos.json
  29. 12 4
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
  30. 13 50
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
  31. 1 1
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
  32. 1 1
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py
  33. 1 1
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py
  34. 8 2
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
  35. 1 1
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
  36. 14 7
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py
  37. 3 7
      ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py
  38. 1 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
  39. 1 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  40. 1 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
  41. 1 0
      ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
  42. 1 0
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
  43. 1 0
      ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
  44. 1 0
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
  45. 1 0
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
  46. 1 0
      ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
  47. 1 0
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
  48. 1 0
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
  49. 1 0
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
  50. 1 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
  51. 1 1
      ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
  52. 47 7
      ambari-server/src/main/resources/scripts/Ambaripreupload.py
  53. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  54. 1 2
      ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
  55. 1 2
      ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml
  56. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
  57. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
  58. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml
  59. 19 1
      ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json
  60. 3 2
      ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
  61. 1 2
      ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
  62. 4 0
      ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
  63. 3 2
      ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
  64. 25 0
      ambari-server/src/main/resources/stacks/HDP/2.5/metainfo.xml
  65. 92 0
      ambari-server/src/main/resources/stacks/HDP/2.5/repos/repoinfo.xml
  66. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/ACCUMULO/metainfo.xml
  67. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml
  68. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/FALCON/metainfo.xml
  69. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/FLUME/metainfo.xml
  70. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/HBASE/metainfo.xml
  71. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/metainfo.xml
  72. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/metainfo.xml
  73. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml
  74. 25 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/KERBEROS/metainfo.xml
  75. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/KNOX/metainfo.xml
  76. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/MAHOUT/metainfo.xml
  77. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/OOZIE/metainfo.xml
  78. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/PIG/metainfo.xml
  79. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER/metainfo.xml
  80. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/metainfo.xml
  81. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/SLIDER/metainfo.xml
  82. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/metainfo.xml
  83. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/SQOOP/metainfo.xml
  84. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/metainfo.xml
  85. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/TEZ/metainfo.xml
  86. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml
  87. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/ZOOKEEPER/metainfo.xml
  88. 22 0
      ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
  89. 109 0
      ambari-server/src/test/java/org/apache/ambari/server/api/services/GroupPrivilegeServiceTest.java
  90. 17 12
      ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java
  91. 362 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/GroupPrivilegeResourceProviderTest.java
  92. 66 26
      ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
  93. 1 2
      ambari-server/src/test/python/TestAmbariServer.py
  94. 1 1
      ambari-server/src/test/python/TestSetupAgent.py
  95. 2 2
      ambari-server/src/test/python/custom_actions/test_ru_execute_tasks.py
  96. 3 3
      ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
  97. 9 9
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  98. 27 27
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  99. 4 4
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
  100. 14 14
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

+ 41 - 22
ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py

@@ -151,32 +151,13 @@ class WebHDFSUtil:
     # only hdfs seems to support webHDFS
     return (is_webhdfs_enabled and default_fs.startswith("hdfs"))
     
-  def parse_path(self, path):
-    """
-    hdfs://nn_url:1234/a/b/c -> /a/b/c
-    hdfs://nn_ha_name/a/b/c -> /a/b/c
-    hdfs:///a/b/c -> /a/b/c
-    /a/b/c -> /a/b/c
-    """
-    math_with_protocol_and_nn_url = re.match("[a-zA-Z]+://[^/]+(/.+)", path)
-    math_with_protocol = re.match("[a-zA-Z]+://(/.+)", path)
-    
-    if math_with_protocol_and_nn_url:
-      path = math_with_protocol_and_nn_url.group(1)
-    elif math_with_protocol:
-      path = math_with_protocol.group(1)
-    else:
-      path = path
-      
-    return re.sub("[/]+", "/", path)
-    
   valid_status_codes = ["200", "201"]
   def run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
     """
     assertable_result - some POST requests return '{"boolean":false}' or '{"boolean":true}'
     depending on if query was successful or not, we can assert this for them
     """
-    target = self.parse_path(target)
+    target = HdfsResourceProvider.parse_path(target)
     
     url = format("{address}/webhdfs/v1{target}?op={operation}&user.name={run_user}", address=self.address, run_user=self.run_user)
     for k,v in kwargs.iteritems():
@@ -394,7 +375,7 @@ class HdfsResourceWebHDFS:
     
     
   def _fill_in_parent_directories(self, target, results):
-    path_parts = self.util.parse_path(target).split("/")[1:]# [1:] remove '' from parts
+    path_parts = HdfsResourceProvider.parse_path(target).split("/")[1:]# [1:] remove '' from parts
     path = "/"
 
     for path_part in path_parts:
@@ -416,13 +397,51 @@ class HdfsResourceProvider(Provider):
   def __init__(self, resource):
     super(HdfsResourceProvider,self).__init__(resource)
     self.fsType = getattr(resource, 'dfs_type')
+    self.ignored_resources_list = self.get_ignored_resources_list()
     if self.fsType != 'HCFS':
       self.assert_parameter_is_set('hdfs_site')
       self.webhdfs_enabled = self.resource.hdfs_site['dfs.webhdfs.enabled']
+      
+  @staticmethod
+  def parse_path(path):
+    """
+    hdfs://nn_url:1234/a/b/c -> /a/b/c
+    hdfs://nn_ha_name/a/b/c -> /a/b/c
+    hdfs:///a/b/c -> /a/b/c
+    /a/b/c -> /a/b/c
+    """
+    math_with_protocol_and_nn_url = re.match("[a-zA-Z]+://[^/]+(/.+)", path)
+    math_with_protocol = re.match("[a-zA-Z]+://(/.+)", path)
+    
+    if math_with_protocol_and_nn_url:
+      path = math_with_protocol_and_nn_url.group(1)
+    elif math_with_protocol:
+      path = math_with_protocol.group(1)
+    else:
+      path = path
+      
+    return re.sub("[/]+", "/", path)
+  
+  def get_ignored_resources_list(self):
+    if not self.resource.hdfs_resource_ignore_file or not os.path.exists(self.resource.hdfs_resource_ignore_file):
+      return []
+    
+    with open(self.resource.hdfs_resource_ignore_file, "rb") as fp:
+      content = fp.read()
+      
+    hdfs_resources_to_ignore = []
+    for hdfs_resource_to_ignore in content.split("\n"):
+      hdfs_resources_to_ignore.append(HdfsResourceProvider.parse_path(hdfs_resource_to_ignore))
+            
+    return hdfs_resources_to_ignore
     
   def action_delayed(self, action_name):
     self.assert_parameter_is_set('type')
-
+    
+    if HdfsResourceProvider.parse_path(self.resource.target) in self.ignored_resources_list:
+      Logger.info("Skipping '{0}' because it is in ignore file {1}.".format(self.resource, self.resource.hdfs_resource_ignore_file))
+      return
+    
     self.get_hdfs_resource_executor().action_delayed(action_name, self)
 
   def action_create_on_execute(self):

+ 12 - 0
ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py

@@ -76,6 +76,18 @@ class HdfsResource(Resource):
   hadoop_bin_dir = ResourceArgument()
   hadoop_conf_dir = ResourceArgument()
   
+  """
+  Path to file which contains '\n'-separated list of hdfs resources, which should not
+  be managed. (simply any action to be skipped on it)
+  
+  This mean that if HdfsResource('/test1'..) is executed and /test1 is one of the lines
+  in the given file, the execution will be ignored.
+  
+  Example value:
+  /var/lib/ambari-agent/data/.hdfs_resource_ignore
+  """
+  hdfs_resource_ignore_file = ResourceArgument()
+  
   # WebHDFS needs these
   hdfs_site = ResourceArgument()
   default_fs = ResourceArgument()

+ 2 - 11
ambari-funtest/pom.xml

@@ -59,15 +59,6 @@
           <forkMode>once</forkMode>
         </configuration>
         <executions>
-          <!-- Will display BUILD SUCCESSFUL if build is successful.
-               Does not matter if the tests fail -->
-          <execution>
-            <id>run-integration-tests</id>
-            <phase>test</phase>
-            <goals>
-              <goal>integration-test</goal>
-            </goals>
-          </execution>
           <!-- Will display BUILD FAILURE if build fails or any test fails -->
           <execution>
             <id>run-verify</id>
@@ -125,7 +116,7 @@
         <executable.shell>sh</executable.shell>
         <fileextension.shell>sh</fileextension.shell>
         <fileextension.dot.shell-default></fileextension.dot.shell-default>
-        <assemblydescriptor>src/main/assemblies/funtest.xml</assemblydescriptor>
+        <assemblydescriptor>src/main/assemblies/empty.xml</assemblydescriptor>
         <packagingFormat>jar</packagingFormat>
       </properties>
     </profile>
@@ -143,7 +134,7 @@
         <executable.shell>cmd</executable.shell>
         <fileextension.shell>cmd</fileextension.shell>
         <fileextension.dot.shell-default></fileextension.dot.shell-default>
-        <assemblydescriptor>src/main/assemblies/funtest.xml</assemblydescriptor>
+        <assemblydescriptor>src/main/assemblies/empty.xml</assemblydescriptor>
         <packagingFormat>jar</packagingFormat>
         </properties>
     </profile>

+ 22 - 0
ambari-funtest/src/main/assemblies/empty.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>empty</id>
+  <formats/>
+</assembly>

+ 0 - 79
ambari-funtest/src/main/assemblies/funtest.xml

@@ -1,79 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-  
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<assembly>
-  <id>dist</id>
-  <formats>
-    <format>dir</format>
-    <format>tar.gz</format>
-  </formats>
-  <includeBaseDirectory>false</includeBaseDirectory>
-  <files>
-    <file>
-      <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
-      <outputDirectory>ambari-funtest-${project.version}/lib/ambari-funtest</outputDirectory>
-    </file>
-  </files>
-  <fileSets>
-    <!-- Distro files, readme, licenses, etc -->
-    <fileSet>
-      <directory>${basedir}/../</directory>
-      <outputDirectory>ambari-funtest-${project.version}/</outputDirectory>
-      <includes>
-        <include>*.txt</include>
-      </includes>
-    </fileSet>
-    <!--
-    <fileSet>
-      <directory>${basedir}/src/main/bin</directory>
-      <outputDirectory>ambari-funtest-${project.version}/bin</outputDirectory>
-      <includes>
-        <include>*</include>
-      </includes>
-      <fileMode>0755</fileMode>
-    </fileSet>
-    -->
-    <fileSet>
-      <directory>${basedir}/src/main/resources/</directory>
-      <outputDirectory>/ambari-funtest-${project.version}/keystore</outputDirectory>
-      <includes>
-        <include>db/*</include>
-        <include>ca.config</include>
-        <include>pass.txt</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>${basedir}/../ambari-web/public</directory>
-      <outputDirectory>ambari-funtest-${project.version}/web</outputDirectory>
-      <includes>
-        <include>**</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>src/main/conf</directory>
-      <outputDirectory>/ambari-funtest-${project.version}/etc/ambari-funtest/conf</outputDirectory>
-    </fileSet>
-  </fileSets>
-  <dependencySets>
-    <dependencySet>
-      <outputDirectory>ambari-funtest-${project.version}/lib/ambari-funtest</outputDirectory>
-      <unpack>false</unpack>
-      <scope>compile</scope>
-    </dependencySet>
-  </dependencySets>
-</assembly>

+ 20 - 2
ambari-metrics/ambari-metrics-grafana/README.md

@@ -33,10 +33,12 @@ Use **ambari-metrics** to visualize metrics exposed via AMS in Grafana.
  - [Save Dashboard](#savedash)
  - [Time Ranges](#timerange)
  - [Edit Panel/Graph](#editpanel)
+ - [Templated Dashboards](#templating)
+    - [Multi Host Templated Dashboards](#multi-templating)
 
 
 ----------
-![enter image description here](screenshots/full-dashboard.png)
+![Full Dashboard](screenshots/full-dashboard.png)
 
 ----------
 <a name="installg"></a>
@@ -257,7 +259,23 @@ http://GRAFANA_HOST:3000
 > 10. When you now add a graph, and select your component and metric, the plotted graph will show you metrics for the selected hostname from the dropdown.
 > 11. The legend on the graph will also now update with the selected host.
 
-**Templalted dashboards do support multiple metrics in a single graph.** 
+**Templated dashboards do support multiple metrics in a single graph.** 
 
 
 ![Templating](screenshots/20-templating.png)
+
+---
+
+<a name="multi-templating"></a>
+### Multi Host Templated Dashboards.
+
+**Templated dashboards now have the ability to filter graphs based on a single host or multiple hosts.**
+
+> 1. Once you've created your templated dashboard, you can edit it gain by clicking on the "cog" on the top, select "Templating".
+> 2. Click on "Edit" for your templating variable.
+> 3. To be able to select Multiiple Hosts, set multi-value selection to "enable" and leave multi-format set to "glob".
+> 4. To have an option for All hosts, select All Value, and set it to "*" and All format to "wildcard".
+> 5. Hit Update and close the templating variables options and you should be now able to select multiple hosts from the dropdown (or "all" hosts at once.)
+
+
+![Multi Host Templating](screenshots/21-multi-templating.png)

+ 40 - 9
ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js

@@ -38,6 +38,7 @@ define([
         }
         var allMetrics = [];
         var appIds = [];
+        //We get a list of components and their associated metrics.
         AmbariMetricsDatasource.prototype.initMetricAppidMapping = function () {
           backendSrv.get(this.url + '/ws/v1/timeline/metrics/metadata')
             .then(function (items) {
@@ -73,7 +74,7 @@ define([
           }
 
           options.url = this.url + options.url;
-          options.inspect = {type: 'discovery'};
+          options.inspect = {type: 'ambarimetrics'};
 
           return backendSrv.datasourceRequest(options);
         };
@@ -82,6 +83,7 @@ define([
          * AMS Datasource  Query
          */
         AmbariMetricsDatasource.prototype.query = function (options) {
+
           var emptyData = function (metric) {
             return {
               data: {
@@ -99,6 +101,7 @@ define([
               }
               var series = [];
               var metricData = res.metrics[0].metrics;
+              // Added hostname to legend for templated dashboards.
               var hostLegend = res.metrics[0].hostname ? ' on ' + res.metrics[0].hostname : '';
               var timeSeries = {};
               if (target.hosts === undefined || target.hosts.trim() === "") {
@@ -122,7 +125,6 @@ define([
             };
 
           };
-
           var getHostAppIdData = function(target) {
             var precision = target.shouldAddPrecision ? '&precision=' + target.precision : '';
             var rate = target.shouldComputeRate ? '._rate._' : '._';
@@ -132,14 +134,16 @@ define([
                 getMetricsData(target)
             );
           };
+          //Check if it's a templated dashboard.
+          var templatedHost = (_.isEmpty(templateSrv.variables)) ? "" : templateSrv.variables[0].options.filter(function(host)
+              { return host.selected; }).map(function(hostName) { return hostName.value; });
 
           var getServiceAppIdData = function(target) {
-            var templatedHost = (_.isEmpty(templateSrv.variables)) ? "" : templateSrv.variables[0].options.filter(function(host)
-              { return host.selected; }).map(function(hostName) { return hostName.value; });
+            var tHost = (_.isEmpty(templateSrv.variables)) ? templatedHost : target.templatedHost;
             var precision = target.shouldAddPrecision ? '&precision=' + target.precision : '';
             var rate = target.shouldComputeRate ? '._rate._' : '._';
             return backendSrv.get(self.url + '/ws/v1/timeline/metrics?metricNames=' + target.metric + rate
-              + target.aggregator + '&hostname=' + templatedHost + '&appId=' + target.app + '&startTime=' + from +
+              + target.aggregator + '&hostname=' + tHost + '&appId=' + target.app + '&startTime=' + from +
               '&endTime=' + to + precision).then(
               getMetricsData(target)
             );
@@ -148,15 +152,43 @@ define([
           // Time Ranges
           var from = Math.floor(options.range.from.valueOf() / 1000);
           var to = Math.floor(options.range.to.valueOf() / 1000);
-          var metricsPromises = _.map(options.targets, function(target) {
+
+          var metricsPromises = [];
+          if (!_.isEmpty(templateSrv.variables)) {
+            if (!_.isEmpty(_.find(templatedHost, function (o) { return o === "*"; }))) {
+              var allHost = templateSrv.variables[0].options.filter(function(all) {
+                return all.text !== "All"; }).map(function(hostName) { return hostName.value; });
+              _.forEach(allHost, function(processHost) {
+              metricsPromises.push(_.map(options.targets, function(target) {
+                target.templatedHost = processHost;
+                console.debug('target app=' + target.app + ',' +
+                  'target metric=' + target.metric + ' on host=' + target.templatedHost);
+                return getServiceAppIdData(target);
+              }));
+            });
+            } else {
+              _.forEach(templatedHost, function(processHost) {
+              metricsPromises.push(_.map(options.targets, function(target) {
+                target.templatedHost = processHost;
+                console.debug('target app=' + target.app + ',' +
+                  'target metric=' + target.metric + ' on host=' + target.templatedHost);
+                return getServiceAppIdData(target);
+              }));
+            });
+            }
+
+            metricsPromises = _.flatten(metricsPromises);
+          } else {
+            metricsPromises = _.map(options.targets, function(target) {
               console.debug('target app=' + target.app + ',' +
-                'target metric=' + target.metric + ' on host=' + target.hosts);
+                'target metric=' + target.metric + ' on host=' + target.tempHost);
               if (!!target.hosts) {
                 return getHostAppIdData(target);
               } else {
                 return getServiceAppIdData(target);
               }
             });
+          }
 
           return $q.all(metricsPromises).then(function(metricsDataArray) {
             var data = _.map(metricsDataArray, function(metricsData) {
@@ -288,8 +320,7 @@ define([
           ]);
           return aggregatorsPromise;
         };
-
         return AmbariMetricsDatasource;
       });
     }
-);
+);

BIN
ambari-metrics/ambari-metrics-grafana/screenshots/21-multi-templating.png


+ 3 - 3
ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor

@@ -131,7 +131,7 @@ case "$1" in
     echo "Checking for previously running Metric Monitor..."
     if [ -f ${PIDFILE} ]; then
       PID=`cat ${PIDFILE}`
-      if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+      if [ -z "`ps ax | grep -w ${PID} | grep resource_monitoring`" ]; then
         echo "${PIDFILE} found with no process. Removing ${PID}..."
         rm -f ${PIDFILE}
       else
@@ -152,7 +152,7 @@ case "$1" in
     sleep 2
 
     echo "Verifying ${METRIC_MONITOR} process status..."
-    if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+    if [ -z "`ps ax | grep -w ${PID} | grep resource_monitoring`" ]; then
       if [ -s ${OUTFILE} ]; then
         echo "ERROR: ${METRIC_MONITOR} start failed. For more details, see ${OUTFILE}:"
         echo "===================="
@@ -173,7 +173,7 @@ case "$1" in
     if [ -f ${PIDFILE} ]; then
       PID=`cat ${PIDFILE}`
       echo "Found ${METRIC_MONITOR} PID: $PID"
-      if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+      if [ -z "`ps ax | grep -w ${PID} | grep resource_monitoring`" ]; then
         echo "${METRIC_MONITOR} not running. Stale PID File at: $PIDFILE"
         retcode=2
       else

+ 1 - 1
ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector

@@ -267,7 +267,7 @@ function start()
   sleep 2
 
   echo "Verifying ${METRIC_COLLECTOR} process status..." | tee -a $STARTUPFILE
-  if [ -z "`ps ax -o pid | grep ${PID}`" ]; then
+  if [ -z "`ps ax | grep -w ${PID} | grep ApplicationHistoryServer`" ]; then
     if [ -s ${OUTFILE} ]; then
       echo "ERROR: ${METRIC_COLLECTOR} start failed. For more details, see ${OUTFILE}:" | tee -a $STARTUPFILE
       echo "===================="

+ 9 - 0
ambari-server/conf/unix/log4j.properties

@@ -23,6 +23,7 @@ ambari.log.file=ambari-server.log
 ambari.config-changes.file=ambari-config-changes.log
 ambari.alerts.file=ambari-alerts.log
 ambari.eclipselink.file=ambari-eclipselink.log
+ambari.dbcheck.file=ambari-server-check-database.log
 
 log4j.rootLogger=INFO,file
 
@@ -50,6 +51,14 @@ log4j.appender.alerts.File=${ambari.log.dir}/${ambari.alerts.file}
 log4j.appender.alerts.layout=org.apache.log4j.PatternLayout
 log4j.appender.alerts.layout.ConversionPattern=%d{ISO8601} %m%n
 
+# Log database check process
+log4j.logger.org.apache.ambari.server.checks.CheckDatabaseHelper=INFO, dbcheck
+log4j.additivity.org.apache.ambari.server.checks.CheckDatabaseHelper=false
+log4j.appender.dbcheck=org.apache.log4j.FileAppender
+log4j.appender.dbcheck.File=${ambari.log.dir}/${ambari.dbcheck.file}
+log4j.appender.dbcheck.layout=org.apache.log4j.PatternLayout
+log4j.appender.dbcheck.layout.ConversionPattern=%d{ISO8601} %m%n
+
 # EclipsLink -> slf4j bridge
 log4j.logger.eclipselink=TRACE,eclipselink
 log4j.additivity.eclipselink=false

+ 9 - 0
ambari-server/conf/windows/log4j.properties

@@ -23,6 +23,7 @@ ambari.log.file=ambari-server.log
 ambari.config-changes.file=ambari-config-changes.log
 ambari.alerts.file=ambari-alerts.log
 ambari.eclipselink.file=ambari-eclipselink.log
+ambari.dbcheck.file=ambari-server-check-database.log
 
 # Define the root logger to the system property "ambari.root.logger".
 log4j.rootLogger=${ambari.root.logger}
@@ -76,6 +77,14 @@ log4j.appender.alerts.File=${ambari.log.dir}\${ambari.alerts.file}
 log4j.appender.alerts.layout=org.apache.log4j.PatternLayout
 log4j.appender.alerts.layout.ConversionPattern=%d{ISO8601} %m%n
 
+# Log database check process
+log4j.logger.org.apache.ambari.server.checks.CheckDatabaseHelper=INFO, dbcheck
+log4j.additivity.org.apache.ambari.server.checks.CheckDatabaseHelper=false
+log4j.appender.dbcheck=org.apache.log4j.FileAppender
+log4j.appender.dbcheck.File=${ambari.log.dir}/${ambari.dbcheck.file}
+log4j.appender.dbcheck.layout=org.apache.log4j.PatternLayout
+log4j.appender.dbcheck.layout.ConversionPattern=%d{ISO8601} %m%n
+
 # EclipsLink -> slf4j bridge
 log4j.logger.eclipselink=TRACE,eclipselink
 log4j.additivity.eclipselink=false

+ 9 - 0
ambari-server/src/main/conf/log4j.properties

@@ -23,6 +23,7 @@ ambari.log.file=ambari-server.log
 ambari.config-changes.file=ambari-config-changes.log
 ambari.alerts.file=ambari-alerts.log
 ambari.eclipselink.file=ambari-eclipselink.log
+ambari.dbcheck.file=ambari-server-check-database.log
 
 # Define the root logger to the system property "ambari.root.logger".
 log4j.rootLogger=${ambari.root.logger}
@@ -76,6 +77,14 @@ log4j.appender.alerts.File=${ambari.log.dir}/${ambari.alerts.file}
 log4j.appender.alerts.layout=org.apache.log4j.PatternLayout
 log4j.appender.alerts.layout.ConversionPattern=%d{ISO8601} %m%n
 
+# Log database check process
+log4j.logger.org.apache.ambari.server.checks.CheckDatabaseHelper=INFO, dbcheck
+log4j.additivity.org.apache.ambari.server.checks.CheckDatabaseHelper=false
+log4j.appender.dbcheck=org.apache.log4j.FileAppender
+log4j.appender.dbcheck.File=${ambari.log.dir}/${ambari.dbcheck.file}
+log4j.appender.dbcheck.layout=org.apache.log4j.PatternLayout
+log4j.appender.dbcheck.layout.ConversionPattern=%d{ISO8601} %m%n
+
 # EclipsLink -> slf4j bridge
 log4j.logger.eclipselink=TRACE,eclipselink
 log4j.additivity.eclipselink=false

+ 1 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/GroupResourceDefinition.java

@@ -44,6 +44,7 @@ public class GroupResourceDefinition extends BaseResourceDefinition {
   public Set<SubResourceDefinition> getSubResourceDefinitions() {
     final Set<SubResourceDefinition> subResourceDefinitions = new HashSet<SubResourceDefinition>();
     subResourceDefinitions.add(new SubResourceDefinition(Resource.Type.Member));
+    subResourceDefinitions.add(new SubResourceDefinition(Resource.Type.GroupPrivilege));
     return subResourceDefinitions;
   }
 }

+ 4 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java

@@ -297,6 +297,10 @@ public class ResourceInstanceFactoryImpl implements ResourceInstanceFactory {
         resourceDefinition = new PrivilegeResourceDefinition(Resource.Type.UserPrivilege);
         break;
 
+      case GroupPrivilege:
+        resourceDefinition = new PrivilegeResourceDefinition(Resource.Type.GroupPrivilege);
+        break;
+
       case ViewPermission:
         resourceDefinition = new ViewPermissionResourceDefinition();
         break;

+ 76 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/GroupPrivilegeService.java

@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.controller.spi.Resource;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ *  Service responsible for group privilege resource requests.
+ */
+public class GroupPrivilegeService extends PrivilegeService {
+
+  private final String groupName;
+
+  public GroupPrivilegeService(String groupName) {
+    this.groupName = groupName;
+  }
+
+  // ----- PrivilegeService --------------------------------------------------
+
+  @Override
+  public Response createPrivilege(String body, HttpHeaders headers, UriInfo ui) {
+    return Response.status(HttpServletResponse.SC_NOT_IMPLEMENTED).build();
+  }
+
+  @Override
+  public Response updatePrivilege(String body, HttpHeaders headers, UriInfo ui, String privilegeId) {
+    return Response.status(HttpServletResponse.SC_NOT_IMPLEMENTED).build();
+  }
+
+  @Override
+  public Response updatePrivileges(String body, HttpHeaders headers, UriInfo ui) {
+    return Response.status(HttpServletResponse.SC_NOT_IMPLEMENTED).build();
+  }
+
+  @Override
+  public Response deletePrivilege(HttpHeaders headers, UriInfo ui, String privilegeId) {
+    return Response.status(HttpServletResponse.SC_NOT_IMPLEMENTED).build();
+  }
+
+  @Override
+  public Response deletePrivileges(String body, HttpHeaders headers, UriInfo ui) {
+    return Response.status(HttpServletResponse.SC_NOT_IMPLEMENTED).build();
+  }
+
+  @Override
+  protected ResourceInstance createPrivilegeResource(String privilegeId) {
+    final Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
+    mapIds.put(Resource.Type.Group, groupName);
+    mapIds.put(Resource.Type.GroupPrivilege, privilegeId);
+    return createResource(Resource.Type.GroupPrivilege, mapIds);
+  }
+}

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/GroupService.java

@@ -130,6 +130,17 @@ public class GroupService extends BaseService {
     return new MemberService(groupName);
   }
 
+  /**
+   * Gets the group privilege service
+   */
+  @Path("{groupName}/privileges")
+  public PrivilegeService getPrivilegeService(@Context javax.ws.rs.core.Request request,
+                                              @PathParam ("groupName") String groupName) {
+
+    return new GroupPrivilegeService(groupName);
+  }
+
+
   /**
    * Create a group resource instance.
    *

+ 104 - 51
ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java

@@ -37,11 +37,9 @@ import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -60,6 +58,7 @@ public class CheckDatabaseHelper {
   private Connection connection;
   private AmbariMetaInfo ambariMetaInfo;
   private Injector injector;
+  private boolean errorAvailable = false;
 
   @Inject
   public CheckDatabaseHelper(DBAccessor dbAccessor,
@@ -112,6 +111,14 @@ public class CheckDatabaseHelper {
     persistService.stop();
   }
 
+  protected boolean isErrorAvailable() {
+    return errorAvailable;
+  }
+
+  protected void setErrorAvailable(boolean errorAvailable) {
+    this.errorAvailable = errorAvailable;
+  }
+
   /*
   * This method checks if all configurations that we have in clusterconfig table
   * have at least one mapping in clusterconfigmapping table. If we found not mapped config
@@ -169,6 +176,7 @@ public class CheckDatabaseHelper {
       for (String clusterName : configsSelectedMoreThanOnce.keySet()) {
         LOG.error(String.format("You have config(s), in cluster %s, that is(are) selected more than once in clusterconfigmapping: %s",
                 clusterName ,StringUtils.join(configsSelectedMoreThanOnce.get(clusterName), ",")));
+        errorAvailable = true;
       }
     } catch (SQLException e) {
       LOG.error("Exception occurred during check for config selected more than ones procedure: ", e);
@@ -203,6 +211,7 @@ public class CheckDatabaseHelper {
 
       if (!hostsWithoutStatus.isEmpty()) {
         LOG.error("You have host(s) without status: " + StringUtils.join(hostsWithoutStatus, ","));
+        errorAvailable = true;
       }
     } catch (SQLException e) {
       LOG.error("Exception occurred during check for host without state procedure: ", e);
@@ -257,7 +266,8 @@ public class CheckDatabaseHelper {
       }
 
       if (hostComponentStateCount != hostComponentDesiredStateCount || hostComponentStateCount != mergedCount) {
-        LOG.error("Your host component state count not equals host component desired state count!");
+        LOG.error("Your host component states(hostcomponentstate table) count not equals host component desired states(hostcomponentdesiredstate table) count!");
+        errorAvailable = true;
       }
 
     } catch (SQLException e) {
@@ -284,27 +294,32 @@ public class CheckDatabaseHelper {
   * If any issue was discovered, we are showing error message for user.
   * */
   protected void checkServiceConfigs()  {
-    String GET_SERVICES_WITHOUT_CONFIGS_QUERY = "select service_name from clusterservices where service_name not in (select service_name from serviceconfig where group_id is null)";
+    String GET_SERVICES_WITHOUT_CONFIGS_QUERY = "select c.cluster_name, service_name from clusterservices cs " +
+            "join clusters c on cs.cluster_id=c.cluster_id " +
+            "where service_name not in (select service_name from serviceconfig sc where sc.cluster_id=cs.cluster_id and sc.service_name=cs.service_name and sc.group_id is null)";
     String GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY = "select service_name from serviceconfig where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null";
-    String GET_STACK_NAME_VERSION_QUERY = "select s.stack_name, s.stack_version from clusters c join stack s on c.desired_stack_id = s.stack_id";
-    String GET_SERVICES_WITH_CONFIGS_QUERY = "select cs.service_name, type_name, sc.version from clusterservices cs " +
-            "join serviceconfig sc on cs.service_name=sc.service_name " +
+    String GET_STACK_NAME_VERSION_QUERY = "select c.cluster_name, s.stack_name, s.stack_version from clusters c " +
+            "join stack s on c.desired_stack_id = s.stack_id";
+    String GET_SERVICES_WITH_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, type_name, sc.version from clusterservices cs " +
+            "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
-            "join clusterconfig cc on scm.config_id=cc.config_id " +
+            "join clusterconfig cc on scm.config_id=cc.config_id and sc.cluster_id=cc.cluster_id " +
+            "join clusters c on cc.cluster_id=c.cluster_id " +
             "where sc.group_id is null " +
-            "group by cs.service_name, type_name, sc.version";
-    String GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY = "select cs.service_name,cc.type_name from clusterservices cs " +
-            "join serviceconfig sc on cs.service_name=sc.service_name " +
+            "group by c.cluster_name, cs.service_name, type_name, sc.version";
+    String GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY = "select c.cluster_name, cs.service_name,cc.type_name from clusterservices cs " +
+            "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
-            "join clusterconfig cc on scm.config_id=cc.config_id " +
-            "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag " +
-            "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name) " +
-            "group by cs.service_name,cc.type_name " +
+            "join clusterconfig cc on scm.config_id=cc.config_id and cc.cluster_id=sc.cluster_id " +
+            "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag and cc.cluster_id=ccm.cluster_id " +
+            "join clusters c on ccm.cluster_id=c.cluster_id " +
+            "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " +
+            "group by c.cluster_name,cs.service_name,cc.type_name " +
             "having sum(ccm.selected) < 1";
-    String stackName = null, stackVersion = null;
-    Set<String> servicesWithoutConfigs = new HashSet<>();
+    Multimap<String, String> servicesWithoutConfigs = HashMultimap.create();
+    Map<String, Map<String, String>>  clusterStackInfo = new HashMap<>();
     Set<String> servicesWithoutMappedConfigs = new HashSet<>();
-    Map<String, List<String>> notSelectedServiceConfigs = new HashMap<>();
+    Map<String, Multimap<String, String>> notSelectedServiceConfigs = new HashMap<>();
     ResultSet rs = null;
 
     try {
@@ -313,12 +328,13 @@ public class CheckDatabaseHelper {
       rs = statement.executeQuery(GET_SERVICES_WITHOUT_CONFIGS_QUERY);
       if (rs != null) {
         while (rs.next()) {
-          servicesWithoutConfigs.add(rs.getString("service_name"));
+          servicesWithoutConfigs.put(rs.getString("cluster_name"), rs.getString("service_name"));
         }
       }
 
-      if (!servicesWithoutConfigs.isEmpty()) {
-        LOG.error("You have services without configs at all: " + StringUtils.join(servicesWithoutConfigs, ","));
+      for (String clusterName : servicesWithoutConfigs.keySet()) {
+        LOG.error(String.format("Service(s): %s, from cluster %s has no config(s) in serviceconfig table!", StringUtils.join(servicesWithoutConfigs.get(clusterName), ","), clusterName));
+        errorAvailable = true;
       }
 
       rs = statement.executeQuery(GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY);
@@ -329,44 +345,63 @@ public class CheckDatabaseHelper {
       }
 
       if (!servicesWithoutMappedConfigs.isEmpty()) {
-        LOG.error("You have services without mapped configs: " + StringUtils.join(servicesWithoutMappedConfigs, ","));
+        LOG.error("You have service(s) without mapped configs in serviceconfigmapping: " + StringUtils.join(servicesWithoutMappedConfigs, ","));
+        errorAvailable = true;
       }
 
       rs = statement.executeQuery(GET_STACK_NAME_VERSION_QUERY);
       if (rs != null) {
         while (rs.next()) {
-          stackName = rs.getString("stack_name");
-          stackVersion = rs.getString("stack_version");
+          Map<String, String> stackInfoMap = new HashMap<>();
+          stackInfoMap.put(rs.getString("stack_name"), rs.getString("stack_version"));
+          clusterStackInfo.put(rs.getString("cluster_name"), stackInfoMap);
         }
       }
 
-      if (stackName != null && stackVersion != null) {
-        Set<String> serviceNames = new HashSet<>();
-        Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = new HashMap<>();
-        Multimap<String, String> stackServiceConfigs = HashMultimap.create();
 
-        rs = statement.executeQuery(GET_SERVICES_WITH_CONFIGS_QUERY);
-        if (rs != null) {
-          String serviceName = null, configType = null;
-          Integer serviceVersion = null;
-          while (rs.next()) {
-            serviceName = rs.getString("service_name");
-            configType = rs.getString("type_name");
-            serviceVersion = rs.getInt("version");
+      Set<String> serviceNames = new HashSet<>();
+      Map<String, Map<Integer, Multimap<String, String>>> dbClusterServiceVersionConfigs = new HashMap<>();
+      Multimap<String, String> stackServiceConfigs = HashMultimap.create();
+
+      rs = statement.executeQuery(GET_SERVICES_WITH_CONFIGS_QUERY);
+      if (rs != null) {
+        String serviceName = null, configType = null, clusterName = null;
+        Integer serviceVersion = null;
+        while (rs.next()) {
+          clusterName = rs.getString("cluster_name");
+          serviceName = rs.getString("service_name");
+          configType = rs.getString("type_name");
+          serviceVersion = rs.getInt("version");
+
+          serviceNames.add(serviceName);
 
-            serviceNames.add(serviceName);
+          if (dbClusterServiceVersionConfigs.get(clusterName) != null) {
+            Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName);
 
-            if (dbServiceVersionConfigs.get(serviceVersion) == null) {
+            if (dbServiceVersionConfigs.get(serviceVersion) != null) {
+              dbServiceVersionConfigs.get(serviceVersion).put(serviceName, configType);
+            } else {
               Multimap<String, String> dbServiceConfigs = HashMultimap.create();
               dbServiceConfigs.put(serviceName, configType);
               dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs);
-            } else {
-              dbServiceVersionConfigs.get(serviceVersion).put(serviceName, configType);
             }
+          } else {
+
+            Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = new HashMap<>();
+            Multimap<String, String> dbServiceConfigs = HashMultimap.create();
+            dbServiceConfigs.put(serviceName, configType);
+            dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs);
+            dbClusterServiceVersionConfigs.put(clusterName, dbServiceVersionConfigs);
+
           }
         }
+      }
 
-
+      for (Map.Entry<String, Map<String, String>> clusterStackInfoEntry : clusterStackInfo.entrySet()) {
+        String clusterName = clusterStackInfoEntry.getKey();
+        Map<String, String> stackInfo = clusterStackInfoEntry.getValue();
+        String stackName = stackInfo.keySet().iterator().next();
+        String stackVersion = stackInfo.get(stackName);
         Map<String, ServiceInfo> serviceInfoMap = ambariMetaInfo.getServices(stackName, stackVersion);
         for (String serviceName : serviceNames) {
           ServiceInfo serviceInfo = serviceInfoMap.get(serviceName);
@@ -376,6 +411,7 @@ public class CheckDatabaseHelper {
           }
         }
 
+        Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName);
         for (Integer serviceVersion : dbServiceVersionConfigs.keySet()) {
           Multimap<String, String> dbServiceConfigs = dbServiceVersionConfigs.get(serviceVersion);
           for (String serviceName : dbServiceConfigs.keySet()) {
@@ -384,33 +420,45 @@ public class CheckDatabaseHelper {
             if (serviceConfigsFromDB != null && serviceConfigsFromStack != null) {
               serviceConfigsFromStack.removeAll(serviceConfigsFromDB);
               if (!serviceConfigsFromStack.isEmpty()) {
-                LOG.error(String.format("Required config(s): %s is(are) not available for service %s with service config version %s",
-                        StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion)));
+                LOG.error(String.format("Required config(s): %s is(are) not available for service %s with service config version %s for cluster %s",
+                        StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName));
+                errorAvailable = true;
               }
             }
           }
         }
       }
 
+
       rs = statement.executeQuery(GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY);
       if (rs != null) {
-        String serviceName = null, configType = null;
+        String serviceName = null, configType = null, clusterName = null;
         while (rs.next()) {
+          clusterName = rs.getString("cluster_name");
           serviceName = rs.getString("service_name");
           configType = rs.getString("type_name");
 
-          if (notSelectedServiceConfigs.get(serviceName) != null) {
-            notSelectedServiceConfigs.get(serviceName).add(configType);
+
+          if (notSelectedServiceConfigs.get(clusterName) != null) {
+            Multimap<String, String> serviceConfigs = notSelectedServiceConfigs.get(clusterName);
+            serviceConfigs.put(serviceName, configType);
           } else {
-            List<String> configTypes = new ArrayList<>();
-            configTypes.add(configType);
-            notSelectedServiceConfigs.put(serviceName, configTypes);
+
+            Multimap<String, String> serviceConfigs = HashMultimap.create();
+            serviceConfigs.put(serviceName, configType);
+            notSelectedServiceConfigs.put(clusterName, serviceConfigs);
+
           }
+
         }
       }
 
-      for (String serviceName : notSelectedServiceConfigs.keySet()) {
-        LOG.error(String.format("You have non selected configs: %s for service %s.", StringUtils.join(notSelectedServiceConfigs.get(serviceName), ","), serviceName));
+      for (String clusterName : notSelectedServiceConfigs.keySet()) {
+        Multimap<String, String> serviceConfig = notSelectedServiceConfigs.get(clusterName);
+        for (String serviceName : serviceConfig.keySet()) {
+          LOG.error(String.format("You have non selected configs: %s for service %s from cluster %s!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName));
+          errorAvailable = true;
+        }
       }
     } catch (SQLException e) {
       LOG.error("Exception occurred during complex service check procedure: ", e);
@@ -467,6 +515,11 @@ public class CheckDatabaseHelper {
     } finally {
       if (checkDatabaseHelper != null) {
         checkDatabaseHelper.closeConnection();
+        if (checkDatabaseHelper.isErrorAvailable()) {
+          System.out.print("Some error(s) was(were) found. Please check ambari-server-check-database.log for problem(s).");
+        } else {
+          System.out.print("No erros were found.");
+        }
       }
     }
   }

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java

@@ -84,6 +84,8 @@ public class DefaultProviderModule extends AbstractProviderModule {
         return new LdapSyncEventResourceProvider(managementController);
       case UserPrivilege:
         return new UserPrivilegeResourceProvider();
+      case GroupPrivilege:
+        return new GroupPrivilegeResourceProvider();
       case Alert:
         return new AlertResourceProvider(managementController);
       case AlertDefinition:

+ 237 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/GroupPrivilegeResourceProvider.java

@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import com.google.inject.Inject;
+import org.apache.ambari.server.StaticallyInject;
+import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
+import org.apache.ambari.server.controller.spi.NoSuchResourceException;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.GroupDAO;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.GroupEntity;
+import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
+import org.apache.ambari.server.orm.entities.PrivilegeEntity;
+import org.apache.ambari.server.orm.entities.ViewEntity;
+import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
+import org.apache.ambari.server.security.authorization.AuthorizationException;
+import org.apache.ambari.server.security.authorization.AuthorizationHelper;
+import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.security.authorization.RoleAuthorization;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Resource provider for group privilege resources.
+ */
+@StaticallyInject
+public class GroupPrivilegeResourceProvider extends ReadOnlyResourceProvider {
+
+  protected static final String PRIVILEGE_PRIVILEGE_ID_PROPERTY_ID = PrivilegeResourceProvider.PRIVILEGE_ID_PROPERTY_ID;
+  protected static final String PRIVILEGE_PERMISSION_NAME_PROPERTY_ID = PrivilegeResourceProvider.PERMISSION_NAME_PROPERTY_ID;
+  protected static final String PRIVILEGE_PERMISSION_LABEL_PROPERTY_ID = PrivilegeResourceProvider.PERMISSION_LABEL_PROPERTY_ID;
+  protected static final String PRIVILEGE_PRINCIPAL_NAME_PROPERTY_ID = PrivilegeResourceProvider.PRINCIPAL_NAME_PROPERTY_ID;
+  protected static final String PRIVILEGE_PRINCIPAL_TYPE_PROPERTY_ID = PrivilegeResourceProvider.PRINCIPAL_TYPE_PROPERTY_ID;
+  protected static final String PRIVILEGE_VIEW_NAME_PROPERTY_ID = ViewPrivilegeResourceProvider.PRIVILEGE_VIEW_NAME_PROPERTY_ID;
+  protected static final String PRIVILEGE_VIEW_VERSION_PROPERTY_ID = ViewPrivilegeResourceProvider.PRIVILEGE_VIEW_VERSION_PROPERTY_ID;
+  protected static final String PRIVILEGE_INSTANCE_NAME_PROPERTY_ID = ViewPrivilegeResourceProvider.PRIVILEGE_INSTANCE_NAME_PROPERTY_ID;
+  protected static final String PRIVILEGE_CLUSTER_NAME_PROPERTY_ID = ClusterPrivilegeResourceProvider.PRIVILEGE_CLUSTER_NAME_PROPERTY_ID;
+  protected static final String PRIVILEGE_TYPE_PROPERTY_ID = AmbariPrivilegeResourceProvider.PRIVILEGE_TYPE_PROPERTY_ID;
+  protected static final String PRIVILEGE_GROUP_NAME_PROPERTY_ID = "PrivilegeInfo/group_name";
+
+  /**
+   * Data access object used to obtain cluster entities.
+   */
+  @Inject
+  protected static ClusterDAO clusterDAO;
+
+  /**
+   * Data access object used to obtain group entities.
+   */
+  @Inject
+  protected static GroupDAO groupDAO;
+
+  /**
+   * Data access object used to obtain view instance entities.
+   */
+  @Inject
+  protected static ViewInstanceDAO viewInstanceDAO;
+
+  /**
+   * The property ids for a privilege resource.
+   */
+  private static Set<String> propertyIds = new HashSet<String>();
+
+  static {
+    propertyIds.add(PRIVILEGE_PRIVILEGE_ID_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_PERMISSION_NAME_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_PERMISSION_LABEL_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_PRINCIPAL_NAME_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_PRINCIPAL_TYPE_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_VIEW_NAME_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_VIEW_VERSION_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_INSTANCE_NAME_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_CLUSTER_NAME_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_TYPE_PROPERTY_ID);
+    propertyIds.add(PRIVILEGE_GROUP_NAME_PROPERTY_ID);
+  }
+
+  /**
+   * Static initialization.
+   *
+   * @param clusterDAO      the cluster data access object
+   * @param groupDAO        the group data access object
+   * @param viewInstanceDAO the view instance data access object
+   */
+  public static void init(ClusterDAO clusterDAO, GroupDAO groupDAO,
+                          ViewInstanceDAO viewInstanceDAO) {
+    GroupPrivilegeResourceProvider.clusterDAO = clusterDAO;
+    GroupPrivilegeResourceProvider.groupDAO = groupDAO;
+    GroupPrivilegeResourceProvider.viewInstanceDAO = viewInstanceDAO;
+  }
+
+  @SuppressWarnings("serial")
+  private static Set<String> pkPropertyIds = new HashSet<String>() {
+    {
+      add(PRIVILEGE_PRIVILEGE_ID_PROPERTY_ID);
+    }
+  };
+
+  /**
+   * The key property ids for a privilege resource.
+   */
+  private static Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
+
+  static {
+    keyPropertyIds.put(Resource.Type.Group, PRIVILEGE_GROUP_NAME_PROPERTY_ID);
+    keyPropertyIds.put(Resource.Type.GroupPrivilege, PRIVILEGE_PRIVILEGE_ID_PROPERTY_ID);
+  }
+
+
+  /**
+   * Constructor.
+   */
+  public GroupPrivilegeResourceProvider() {
+    super(propertyIds, keyPropertyIds, null);
+
+    EnumSet<RoleAuthorization> requiredAuthorizations = EnumSet.of(RoleAuthorization.AMBARI_ASSIGN_ROLES);
+    setRequiredCreateAuthorizations(requiredAuthorizations);
+    setRequiredDeleteAuthorizations(requiredAuthorizations);
+    setRequiredGetAuthorizations(requiredAuthorizations);
+    setRequiredUpdateAuthorizations(requiredAuthorizations);
+  }
+
+  // ----- PrivilegeResourceProvider -----------------------------------------
+
+  @Override
+  protected Set<String> getPKPropertyIds() {
+    return pkPropertyIds;
+  }
+
+  @Override
+  public Set<Resource> getResources(Request request, Predicate predicate)
+      throws SystemException, UnsupportedPropertyException,
+      NoSuchResourceException, NoSuchParentResourceException {
+    final Set<Resource> resources = new HashSet<Resource>();
+    final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
+
+    // Ensure that the authenticated user has authorization to get this information
+    if (!AuthorizationHelper.isAuthorized(ResourceType.AMBARI, null, RoleAuthorization.AMBARI_MANAGE_GROUPS)) {
+      throw new AuthorizationException();
+    }
+
+    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+      final String groupName = (String) propertyMap.get(PRIVILEGE_GROUP_NAME_PROPERTY_ID);
+
+      if (groupName != null) {
+        GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
+
+        if (groupEntity == null) {
+          throw new SystemException("Group " + groupName + " was not found");
+        }
+
+        final Set<PrivilegeEntity> privileges = groupEntity.getPrincipal().getPrivileges();
+        for (PrivilegeEntity privilegeEntity : privileges) {
+          resources.add(toResource(privilegeEntity, groupName, requestedIds));
+        }
+      }
+    }
+
+    return resources;
+  }
+
+  /**
+   * Translate the found data into a Resource
+   *
+   * @param privilegeEntity the privilege data
+   * @param groupName        the group name
+   * @param requestedIds    the relevant request ids
+   * @return a resource
+   */
+  protected Resource toResource(PrivilegeEntity privilegeEntity, Object groupName, Set<String> requestedIds) {
+    final ResourceImpl resource = new ResourceImpl(Resource.Type.GroupPrivilege);
+
+    setResourceProperty(resource, PRIVILEGE_GROUP_NAME_PROPERTY_ID, groupName, requestedIds);
+    setResourceProperty(resource, PRIVILEGE_PRIVILEGE_ID_PROPERTY_ID, privilegeEntity.getId(), requestedIds);
+    setResourceProperty(resource, PRIVILEGE_PERMISSION_NAME_PROPERTY_ID, privilegeEntity.getPermission().getPermissionName(), requestedIds);
+    setResourceProperty(resource, PRIVILEGE_PERMISSION_LABEL_PROPERTY_ID, privilegeEntity.getPermission().getPermissionLabel(), requestedIds);
+    setResourceProperty(resource, PRIVILEGE_PRINCIPAL_TYPE_PROPERTY_ID, privilegeEntity.getPrincipal().getPrincipalType().getName(), requestedIds);
+
+    final String principalTypeName = privilegeEntity.getPrincipal().getPrincipalType().getName();
+    if (principalTypeName.equals(PrincipalTypeEntity.GROUP_PRINCIPAL_TYPE_NAME)) {
+      final GroupEntity groupEntity = groupDAO.findGroupByPrincipal(privilegeEntity.getPrincipal());
+      setResourceProperty(resource, PRIVILEGE_PRINCIPAL_NAME_PROPERTY_ID, groupEntity.getGroupName(), requestedIds);
+    }
+
+    String typeName = privilegeEntity.getResource().getResourceType().getName();
+    ResourceType resourceType = ResourceType.translate(typeName);
+    if (resourceType != null) {
+      switch (resourceType) {
+        case AMBARI:
+          // there is nothing special to add for this case
+          break;
+        case CLUSTER:
+          final ClusterEntity clusterEntity = clusterDAO.findByResourceId(privilegeEntity.getResource().getId());
+          setResourceProperty(resource, PRIVILEGE_CLUSTER_NAME_PROPERTY_ID, clusterEntity.getClusterName(), requestedIds);
+          break;
+        case VIEW:
+          final ViewInstanceEntity viewInstanceEntity = viewInstanceDAO.findByResourceId(privilegeEntity.getResource().getId());
+          final ViewEntity viewEntity = viewInstanceEntity.getViewEntity();
+
+          setResourceProperty(resource, PRIVILEGE_VIEW_NAME_PROPERTY_ID, viewEntity.getCommonName(), requestedIds);
+          setResourceProperty(resource, PRIVILEGE_VIEW_VERSION_PROPERTY_ID, viewEntity.getVersion(), requestedIds);
+          setResourceProperty(resource, PRIVILEGE_INSTANCE_NAME_PROPERTY_ID, viewInstanceEntity.getName(), requestedIds);
+          break;
+      }
+
+      setResourceProperty(resource, PRIVILEGE_TYPE_PROPERTY_ID, resourceType.name(), requestedIds);
+    }
+
+    return resource;
+  }
+}

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java

@@ -130,6 +130,7 @@ public interface Resource {
     StackLevelConfiguration,
     LdapSyncEvent,
     UserPrivilege,
+    GroupPrivilege,
     RepositoryVersion,
     CompatibleRepositoryVersion,
     ClusterStackVersion,
@@ -242,6 +243,7 @@ public interface Resource {
     public static final Type StackLevelConfiguration = InternalType.StackLevelConfiguration.getType();
     public static final Type LdapSyncEvent = InternalType.LdapSyncEvent.getType();
     public static final Type UserPrivilege = InternalType.UserPrivilege.getType();
+    public static final Type GroupPrivilege = InternalType.GroupPrivilege.getType();
     public static final Type RepositoryVersion = InternalType.RepositoryVersion.getType();
     public static final Type CompatibleRepositoryVersion = InternalType.CompatibleRepositoryVersion.getType();
     public static final Type ClusterStackVersion = InternalType.ClusterStackVersion.getType();

+ 66 - 10
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java

@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.upgrade;
 
+import com.google.common.collect.Lists;
 import com.google.gson.JsonArray;
 import com.google.gson.JsonElement;
 import com.google.gson.JsonObject;
@@ -30,6 +31,8 @@ import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.PermissionDAO;
+import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.state.Cluster;
@@ -37,7 +40,9 @@ import org.apache.ambari.server.state.Clusters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -58,12 +63,19 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   @Inject
   DaoUtils daoUtils;
 
+  @Inject
+  PermissionDAO permissionDAO;
+
+  @Inject
+  ResourceTypeDAO resourceTypeDAO;
+
   /**
    * Logger.
    */
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog240.class);
 
-
+  private static final String ID = "id";
+  private static final String SETTING_TABLE = "setting";
 
 
   // ----- Constructors ------------------------------------------------------
@@ -76,7 +88,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   @Inject
   public UpgradeCatalog240(Injector injector) {
     super(injector);
-    this.injector = injector;
+    injector.injectMembers(this);
   }
 
   // ----- UpgradeCatalog ----------------------------------------------------
@@ -103,6 +115,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
     updateAdminPermissionTable();
+    createSettingTable();
   }
 
   @Override
@@ -115,7 +128,39 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     addNewConfigurationsFromXml();
     updateAlerts();
     setRoleSortOrder();
+    addSettingPermission();
+  }
+
+  private void createSettingTable() throws SQLException {
+    List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
 
+    //  Add setting table
+    LOG.info("Creating " + SETTING_TABLE + " table");
+
+    columns.add(new DBAccessor.DBColumnInfo(ID, Long.class, null, null, false));
+    columns.add(new DBAccessor.DBColumnInfo("name", String.class, 255, null, false));
+    columns.add(new DBAccessor.DBColumnInfo("setting_type", String.class, 255, null, false));
+    columns.add(new DBAccessor.DBColumnInfo("content", String.class, 3000, null, false));
+    columns.add(new DBAccessor.DBColumnInfo("updated_by", String.class, 255, "_db", false));
+    columns.add(new DBAccessor.DBColumnInfo("update_timestamp", Long.class, null, null, false));
+    dbAccessor.createTable(SETTING_TABLE, columns, ID);
+    addSequence("setting_id_seq", 0L, false);
+  }
+
+  protected void addSettingPermission() throws SQLException {
+    String administratorPermissionId =
+            permissionDAO.findPermissionByNameAndType("AMBARI.ADMINISTRATOR", resourceTypeDAO.findByName("AMBARI")).getId().toString();
+    String selectRoleSql = "select * from roleauthorization where authorization_id = 'AMBARI.MANAGE_SETTINGS'";
+    if (executeAndCheckEmptyResult(selectRoleSql)) {
+      dbAccessor.insertRow("roleauthorization", new String[]{"authorization_id", "authorization_name"},
+              new String[]{"'AMBARI.MANAGE_SETTINGS'", "'Manage settings'"}, false);
+    }
+
+    String selectPermissionSql = "select * from permission_roleauthorization where authorization_id = 'AMBARI.MANAGE_SETTINGS'";
+    if (executeAndCheckEmptyResult(selectPermissionSql)) {
+      dbAccessor.insertRow("permission_roleauthorization", new String[]{"permission_id", "authorization_id"},
+              new String[]{administratorPermissionId, "'AMBARI.MANAGE_SETTINGS'"}, false);
+    }
   }
 
   protected void updateAlerts() {
@@ -147,21 +192,21 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
 
       Map<AlertDefinitionEntity, List<String>> alertDefinitionParams = new HashMap<>();
       checkedPutToMap(alertDefinitionParams, namenodeLastCheckpointAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("connection.timeout", "checkpoint.time.warning.threshold", "checkpoint.time.critical.threshold")));
+              Lists.newArrayList("connection.timeout", "checkpoint.time.warning.threshold", "checkpoint.time.critical.threshold"));
       checkedPutToMap(alertDefinitionParams, namenodeHAHealthAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("connection.timeout")));
+              Lists.newArrayList("connection.timeout"));
       checkedPutToMap(alertDefinitionParams, nodemanagerHealthAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("connection.timeout")));
+              Lists.newArrayList("connection.timeout"));
       checkedPutToMap(alertDefinitionParams, nodemanagerHealthSummaryAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("connection.timeout")));
+              Lists.newArrayList("connection.timeout"));
       checkedPutToMap(alertDefinitionParams, hiveMetastoreProcessAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("default.smoke.user", "default.smoke.principal", "default.smoke.keytab")));
+              Lists.newArrayList("default.smoke.user", "default.smoke.principal", "default.smoke.keytab"));
       checkedPutToMap(alertDefinitionParams, hiveServerProcessAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("default.smoke.user", "default.smoke.principal", "default.smoke.keytab")));
+              Lists.newArrayList("default.smoke.user", "default.smoke.principal", "default.smoke.keytab"));
       checkedPutToMap(alertDefinitionParams, hiveWebhcatServerStatusAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("default.smoke.user", "connection.timeout")));
+              Lists.newArrayList("default.smoke.user", "connection.timeout"));
       checkedPutToMap(alertDefinitionParams, flumeAgentStatusAlertDefinitionEntity,
-              new ArrayList<String>(Arrays.asList("run.directory")));
+              Lists.newArrayList("run.directory"));
 
       for(Map.Entry<AlertDefinitionEntity, List<String>> entry : alertDefinitionParams.entrySet()){
         AlertDefinitionEntity alertDefinition = entry.getKey();
@@ -186,6 +231,17 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     }
   }
 
+  private boolean executeAndCheckEmptyResult(String sql) throws SQLException {
+    try(Statement statement = dbAccessor.getConnection().createStatement();
+        ResultSet resultSet = statement.executeQuery(sql)) {
+      if (resultSet != null && resultSet.next()) {
+        return false;
+      } else {
+        return true;
+      }
+    }
+  }
+
   protected String addParam(String source, List<String> params) {
     JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
     JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");

+ 3 - 4
ambari-server/src/main/python/ambari_server/checkDatabase.py

@@ -39,8 +39,7 @@ from ambari_server.serverUtils import is_server_runing
 from ambari_server.userInput import get_YN_input
 
 CHECK_DATABASE_HELPER_CMD = "{0} -cp {1} " + \
-                         "org.apache.ambari.server.checks.CheckDatabaseHelper" + \
-                         " > " + configDefaults.SERVER_LOG_FILE + " 2>&1"
+                         "org.apache.ambari.server.checks.CheckDatabaseHelper"
 
 def check_database(options):
 
@@ -71,10 +70,10 @@ def check_database(options):
   print_info_msg("Return code from check database command, retcode = " + str(retcode))
 
   if retcode > 0:
-    print_error_msg("Database check failed to complete. Please check ambari-server.log for problem.")
+    print_error_msg("Database check failed to complete. Please check ambari-server.log and ambari-server-check-database.log for problem.")
     raise FatalException(1, 'Database check failed.')
   else:
-    print_info_msg('Check database completed successfully. Please check ambari-server.log for results.')
+    print str(stdout)
 
 
 

+ 1 - 0
ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py

@@ -186,6 +186,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py

@@ -265,6 +265,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py

@@ -119,6 +119,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 2 - 2
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/gpcheck-env.xml → ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-check-env.xml

@@ -18,11 +18,11 @@
 -->
 
 <configuration supports_adding_forbidden="true">
-  <!-- gpcheck.cnf -->
+  <!-- hawq_check.cnf -->
     <property>
       <name>content</name>
       <display-name>Content</display-name>
-      <description>Contents of the configuration file /usr/local/hawq/etc/gpcheck.cnf. This file is used by 'hawq check' command, which can be run manually by gpadmin user on the HAWQ master host. This command validates the system parameters and HDFS parameters mentioned in this file to ensure optimal HAWQ operation.</description>
+      <description>Contents of the configuration file /usr/local/hawq/etc/hawq_check.cnf. This file is used by 'hawq check' command, which can be run manually by gpadmin user on the HAWQ master host. This command validates the system parameters and HDFS parameters mentioned in this file to ensure optimal HAWQ operation.</description>
         <value>
 [global]
 configfile_version = 4

+ 56 - 69
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/kerberos.json

@@ -1,73 +1,60 @@
 {
-    "services": [
+  "services": [
+    {
+      "name": "HAWQ",
+      "identities": [
         {
-            "name": "HAWQ",
-            "identities": [
-                {
-                    "name": "/hdfs"
-                }
-            ], 
-            "configurations": [
-                {
-                    "hawq-site": {
-                        "enable_secure_filesystem": "ON",
-                        "krb_server_keyfile": "${keytab_dir}/hawq.service.keytab"
-                    }
-                },
-                {
-                    "hdfs-client": {
-                        "hadoop.security.authentication": "kerberos"
-                    }
-                }
-            ],
-            "components": [
-                {
-                    "identities": [
-                        {
-                            "keytab": {
-                                "file": "${keytab_dir}/hawq.service.keytab", 
-                                "group": {
-                                    "access": "", 
-                                    "name": "${cluster-env/user_group}"
-                                }, 
-                                "owner": {
-                                    "access": "r", 
-                                    "name": "gpadmin"
-                                }
-                            }, 
-                            "name": "hawq_master_hawq", 
-                            "principal": {
-                                "type": "service", 
-                                "value": "postgres@${realm}"
-                            }
-                        }
-                    ], 
-                    "name": "HAWQMASTER"
-                }, 
-                {
-                    "identities": [
-                        {
-                            "keytab": {
-                                "file": "${keytab_dir}/hawq.service.keytab", 
-                                "group": {
-                                    "access": "", 
-                                    "name": "${cluster-env/user_group}"
-                                }, 
-                                "owner": {
-                                    "access": "r", 
-                                    "name": "gpadmin"
-                                }
-                            }, 
-                            "name": "hawq_standby_hawq", 
-                            "principal": {
-                                "type": "service", 
-                                "value": "postgres@${realm}"
-                            }
-                        }
-                    ], 
-                    "name": "HAWQSTANDBY"
-                }
-            ] 
+          "name": "/HDFS/NAMENODE/hdfs"
+        },
+        {
+          "name": "hawq_identity",
+          "principal": {
+            "type": "user",
+            "value": "postgres@${realm}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hawq.service.keytab",
+            "owner": {
+              "access": "r",
+              "name": "gpadmin"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}"
+            }
+          }
+        }
+      ],
+      "configurations": [
+        {
+          "hawq-site": {
+            "enable_secure_filesystem": "ON",
+            "krb_server_keyfile": "${keytab_dir}/hawq.service.keytab"
+          }
+        },
+        {
+          "hdfs-client": {
+            "hadoop.security.authentication": "kerberos"
+          }
+        }
+      ],
+      "components" : [
+        {
+          "name": "HAWQMASTER",
+          "identities": [
+            {
+              "name": "/HAWQ/hawq_identity"
+            }
+          ]
+        },
+        {
+          "name": "HAWQSTANDBY",
+          "identities": [
+            {
+              "name": "/HAWQ/hawq_identity"
+            }
+          ]
         }
-    ]
+      ]
+    }
+  ]
 }

+ 12 - 4
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml

@@ -39,7 +39,7 @@
           </commandScript>
           <customCommands>
             <customCommand>
-              <name>IMMEDIATE_STOP_CLUSTER</name>
+              <name>IMMEDIATE_STOP_HAWQ_SERVICE</name>
               <commandScript>
                 <script>scripts/hawqmaster.py</script>
                 <scriptType>PYTHON</scriptType>
@@ -78,7 +78,15 @@
           </commandScript>
           <customCommands>
             <customCommand>
-              <name>ACTIVATE_STANDBY</name>
+              <name>ACTIVATE_HAWQ_STANDBY</name>
+              <commandScript>
+                <script>scripts/hawqstandby.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>1200</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>RESYNC_HAWQ_STANDBY</name>
               <commandScript>
                 <script>scripts/hawqstandby.py</script>
                 <scriptType>PYTHON</scriptType>
@@ -110,7 +118,7 @@
           </commandScript>
           <customCommands>
             <customCommand>
-              <name>IMMEDIATE_STOP</name>
+              <name>IMMEDIATE_STOP_HAWQ_SEGMENT</name>
               <commandScript>
                 <script>scripts/hawqsegment.py</script>
                 <scriptType>PYTHON</scriptType>
@@ -144,7 +152,7 @@
       <configuration-dependencies>
         <config-type>hawq-env</config-type>
         <config-type>hawq-site</config-type>
-        <config-type>gpcheck-env</config-type>
+        <config-type>hawq-check-env</config-type>
         <config-type>hdfs-client</config-type>
         <config-type>yarn-client</config-type>
         <config-type>hawq-limits-env</config-type>

+ 13 - 50
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py

@@ -20,9 +20,7 @@ import os
 import time
 import crypt
 import filecmp
-from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.core.resources.system import Execute, Directory, File
-from resource_management.libraries.script.config_dictionary import ConfigDictionary
 from resource_management.core.logger import Logger
 from resource_management.core.system import System
 from resource_management.core.exceptions import Fail
@@ -67,58 +65,23 @@ def setup_common_configurations():
   """
   Sets up the config files common to master, standby and segment nodes.
   """
-  __update_hdfs_client()
-  __update_yarn_client()
-  __update_hawq_site()
-  __set_osparams()
-
-def __update_hdfs_client():
-  """
-  Writes hdfs-client.xml on the local filesystem on hawq nodes.
-  If hdfs ha is enabled, appends related parameters to hdfs-client.xml
-  """
-  import params
-
-  hdfs_client_dict = params.hdfs_client.copy()
-  
-  XmlConfig("hdfs-client.xml",
-            conf_dir=hawq_constants.hawq_config_dir,
-            configurations=ConfigDictionary(hdfs_client_dict),
-            configuration_attributes=params.config['configuration_attributes']['hdfs-client'],
-            owner=hawq_constants.hawq_user,
-            group=hawq_constants.hawq_group,
-            mode=0644)
-
-
-def __update_yarn_client():
-  """
-  Writes yarn-client.xml on the local filesystem on hawq nodes.
-  If yarn ha is enabled, appends related parameters to yarn-client.xml
-  """
   import params
 
-  XmlConfig("yarn-client.xml",
-            conf_dir=hawq_constants.hawq_config_dir,
-            configurations=params.yarn_client,
-            configuration_attributes=params.config['configuration_attributes']['yarn-client'],
-            owner=hawq_constants.hawq_user,
-            group=hawq_constants.hawq_group,
-            mode=0644)
+  # Write hdfs-client.xml on the local filesystem. If hdfs HA is enabled, append related parameters
+  params.XmlConfig(filename="hdfs-client.xml",
+                   configurations=params.hdfs_client,
+                   configuration_attributes=params.config_attrs['hdfs-client'])
 
+  # Write yarn-client.xml on the local filesystem. If yarn HA is enabled, append related parameters
+  params.XmlConfig(filename="yarn-client.xml",
+                   configurations=params.yarn_client,
+                   configuration_attributes=params.config_attrs['yarn-client'])
 
-def __update_hawq_site():
-  """
-  Sets up hawq-site.xml
-  """
-  import params
-  
-  XmlConfig("hawq-site.xml",
-            conf_dir=hawq_constants.hawq_config_dir,
-            configurations=ConfigDictionary(params.hawq_site),
-            configuration_attributes=params.config['configuration_attributes']['hawq-site'],
-            owner=hawq_constants.hawq_user,
-            group=hawq_constants.hawq_group,
-            mode=0644)
+  # Write hawq-site.xml on the local filesystem.
+  params.XmlConfig(filename="hawq-site.xml",
+                   configurations=params.hawq_site,
+                   configuration_attributes=params.config_attrs['hawq-site'])
+  __set_osparams()
 
 
 def __set_osparams():

+ 1 - 1
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py

@@ -48,7 +48,7 @@ sysctl_conf_dir = "/etc/sysctl.d"
 hawq_slaves_file = os.path.join(hawq_config_dir, "slaves")
 hawq_greenplum_path_file = os.path.join(hawq_home_dir, "greenplum_path.sh")
 hawq_hosts_file = "/tmp/hawq_hosts"
-hawq_check_file = os.path.join(hawq_config_dir, "gpcheck.cnf")
+hawq_check_file = os.path.join(hawq_config_dir, "hawq_check.cnf")
 sysctl_suse_file = "/etc/sysctl.conf"
 sysctl_backup_file = "/etc/sysctl.conf.backup.{0}"
 hawq_sysctl_filename = "hawq_sysctl.conf"

+ 1 - 1
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py

@@ -51,7 +51,7 @@ class HawqMaster(Script):
     from hawqstatus import get_pid_file
     check_process_status(get_pid_file())
 
-  def immediate_stop_cluster(self, env):
+  def immediate_stop_hawq_service(self, env):
     master_helper.stop(hawq_constants.IMMEDIATE, hawq_constants.CLUSTER)
 
 if __name__ == "__main__":

+ 1 - 1
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py

@@ -73,7 +73,7 @@ class HawqSegment(Script):
     from hawqstatus import get_pid_file
     check_process_status(get_pid_file())
 
-  def immediate_stop(self, env):
+  def immediate_stop_hawq_segment(self, env):
     self.stop(env, mode=hawq_constants.IMMEDIATE)
 
 

+ 8 - 2
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py

@@ -18,6 +18,7 @@ limitations under the License.
 """
 from resource_management import Script
 from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.logger import Logger
 
 import master_helper
 import common
@@ -51,10 +52,15 @@ class HawqStandby(Script):
     from hawqstatus import get_pid_file
     check_process_status(get_pid_file())
 
-  def activate_standby(self, env):
+  def activate_hawq_standby(self, env):
     import utils
     utils.exec_hawq_operation(hawq_constants.ACTIVATE, "{0} -a -M {1} -v".format(hawq_constants.STANDBY, hawq_constants.FAST))
-
+  def resync_hawq_standby(self,env):
+    import params
+    import utils
+    Logger.info("Re-synchronizing HAWQ Standby..")
+    utils.exec_hawq_operation(hawq_constants.INIT, "{0} -n -a -v -M {1}".format(hawq_constants.STANDBY, hawq_constants.FAST))
+    Logger.info("HAWQ Standby host {0} Re-Sync successful".format(params.hostname))
 
 if __name__ == "__main__":
     HawqStandby().execute()

+ 1 - 1
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py

@@ -34,7 +34,7 @@ def __setup_master_specific_conf_files():
   """
   import params
 
-  File(hawq_constants.hawq_check_file, content=params.gpcheck_content, owner=hawq_constants.hawq_user, group=hawq_constants.hawq_group,
+  File(hawq_constants.hawq_check_file, content=params.hawq_check_content, owner=hawq_constants.hawq_user, group=hawq_constants.hawq_group,
       mode=0644)
 
   File(hawq_constants.hawq_slaves_file, content=Template("slaves.j2"), owner=hawq_constants.hawq_user, group=hawq_constants.hawq_group,

+ 14 - 7
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/params.py

@@ -16,16 +16,16 @@ See the License for the specific language governing permissions and
 limitations under the License.
 """
 
-import os
 import functools
-from hawq_constants import PXF_PORT, pxf_hdfs_test_dir
+import hawq_constants
 from resource_management import Script
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.functions import get_kinit_path
 
 config = Script.get_config()
-
+config_attrs = config['configuration_attributes']
 
 def __get_component_host(component):
   """
@@ -76,9 +76,16 @@ HdfsResource = functools.partial(HdfsResource,
                                  default_fs=default_fs)
 
 
+# XMLConfig partial function
+XmlConfig = functools.partial(XmlConfig,
+                              conf_dir=hawq_constants.hawq_config_dir,
+                              owner=hawq_constants.hawq_user,
+                              group=hawq_constants.hawq_group,
+                              mode=0644)
+
 # For service Check
 is_pxf_installed = __get_component_host("pxf_hosts") is not None
-namenode_path =  "{0}:{1}".format(__get_component_host("namenode_host"), PXF_PORT) if dfs_nameservice is None else dfs_nameservice
+namenode_path =  "{0}:{1}".format(__get_component_host("namenode_host"), hawq_constants.PXF_PORT) if dfs_nameservice is None else dfs_nameservice
 table_definition = {
   "HAWQ": {
     "name": "ambari_hawq_test",
@@ -90,13 +97,13 @@ table_definition = {
     "name": "ambari_hawq_pxf_hdfs_readable_test",
     "create_type": "READABLE EXTERNAL",
     "drop_type": "EXTERNAL",
-    "description": "(col1 int) LOCATION ('pxf://{0}{1}?PROFILE=HdfsTextSimple') FORMAT 'TEXT'".format(namenode_path, pxf_hdfs_test_dir)
+    "description": "(col1 int) LOCATION ('pxf://{0}{1}?PROFILE=HdfsTextSimple') FORMAT 'TEXT'".format(namenode_path, hawq_constants.pxf_hdfs_test_dir)
   },
   "EXTERNAL_HDFS_WRITABLE": {
     "name": "ambari_hawq_pxf_hdfs_writable_test",
     "create_type": "WRITABLE EXTERNAL",
     "drop_type": "EXTERNAL",
-    "description": "(col1 int) LOCATION ('pxf://{0}{1}?PROFILE=HdfsTextSimple') FORMAT 'TEXT'".format(namenode_path, pxf_hdfs_test_dir)
+    "description": "(col1 int) LOCATION ('pxf://{0}{1}?PROFILE=HdfsTextSimple') FORMAT 'TEXT'".format(namenode_path, hawq_constants.pxf_hdfs_test_dir)
   }
 }
 
@@ -107,7 +114,7 @@ rm_host = __get_component_host('rm_host')
 yarn_ha_enabled = default('/configurations/yarn-site/yarn.resourcemanager.ha.enabled', False)
 
 # Config files
-gpcheck_content = config['configurations']['gpcheck-env']['content']
+hawq_check_content = config['configurations']['hawq-check-env']['content']
 # database user limits
 hawq_limits = config['configurations']['hawq-limits-env']
 # sysctl parameters

+ 3 - 7
ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py

@@ -79,12 +79,8 @@ def exec_ssh_cmd(hostname, cmd):
   """
   Runs the command on the remote host as gpadmin user
   """
-  import params
   # Only gpadmin should be allowed to run command via ssh, thus not exposing user as a parameter
-  if params.hostname != hostname:
-    cmd = "su - {0} -c 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {1} \"{2} \" '".format(hawq_constants.hawq_user, hostname, cmd)
-  else:
-    cmd = "su - {0} -c \"{1}\"".format(hawq_constants.hawq_user, cmd)
+  cmd = "su - {0} -c \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {1} \\\"{2} \\\" \"".format(hawq_constants.hawq_user, hostname, cmd)
   Logger.info("Command executed: {0}".format(cmd))
   process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
   (stdout, stderr) = process.communicate()
@@ -97,9 +93,9 @@ def exec_psql_cmd(command, host, port, db="template1", tuples_only=True):
   """
   src_cmd = "export PGPORT={0} && source {1}".format(port, hawq_constants.hawq_greenplum_path_file)
   if tuples_only:
-    cmd = src_cmd + " && psql -d {0} -c \\\"{1};\\\"".format(db, command)
+    cmd = src_cmd + " && psql -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command)
   else:
-    cmd = src_cmd + " && psql -t -d {0} -c \\\"{1};\\\"".format(db, command)
+    cmd = src_cmd + " && psql -t -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command)
   retcode, out, err = exec_ssh_cmd(host, cmd)
   if retcode:
     Logger.error("SQL command executed failed: {0}\nReturncode: {1}\nStdout: {2}\nStderr: {3}".format(cmd, retcode, out, err))

+ 1 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py

@@ -223,6 +223,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -325,6 +325,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py

@@ -443,6 +443,7 @@ import functools
 HdfsResource = functools.partial(
  HdfsResource,
   user = hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py

@@ -274,6 +274,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py

@@ -343,6 +343,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py

@@ -82,6 +82,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py

@@ -265,6 +265,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py

@@ -83,6 +83,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py

@@ -64,6 +64,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py

@@ -188,6 +188,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py

@@ -295,6 +295,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py

@@ -87,6 +87,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 0
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py

@@ -268,6 +268,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 1
ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py

@@ -144,7 +144,7 @@ class ExecuteUpgradeTasks(Script):
                             self.logging_level,
                             Script.get_tmp_dir()]
 
-          task.command = " ".join(command_params)
+          task.command = "source /var/lib/ambari-agent/ambari-env.sh ; " + " ".join(command_params)
           # Replace redundant whitespace to make the unit tests easier to validate
           task.command = re.sub("\s+", " ", task.command).strip()
 

+ 47 - 7
ambari-server/src/main/resources/scripts/Ambaripreupload.py

@@ -166,7 +166,8 @@ with Environment() as env:
       hadoop_conf_dir = hadoop_conf_dir,
       principal_name = None,
       hdfs_site = hdfs_site,
-      default_fs = fs_default
+      default_fs = fs_default,
+      hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
     )
    
   def _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed):
@@ -231,7 +232,40 @@ with Environment() as env:
    
     source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
     return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
+  
+  def createHdfsResources():
+    params.HdfsResource('/atshistory', user='hdfs', change_permissions_for_parents=True, owner='yarn', group='hadoop', type='directory', action= ['create_on_execute'], mode=0755)
+    params.HdfsResource('/user/hcat', owner='hcat', type='directory', action=['create_on_execute'], mode=0755)
+    params.HdfsResource('/hive/warehouse', owner='hive', type='directory', action=['create_on_execute'], mode=0777)
+    params.HdfsResource('/user/hive', owner='hive', type='directory', action=['create_on_execute'], mode=0755)
+    params.HdfsResource('/tmp', mode=0777, action=['create_on_execute'], type='directory', owner='hdfs')
+    params.HdfsResource('/user/ambari-qa', type='directory', action=['create_on_execute'], mode=0770)
+    params.HdfsResource('/user/oozie', owner='oozie', type='directory', action=['create_on_execute'], mode=0775)
+    params.HdfsResource('/app-logs', recursive_chmod=True, owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
+    params.HdfsResource('/tmp/entity-file-history/active', owner='yarn', group='hadoop', type='directory', action=['create_on_execute'])
+    params.HdfsResource('/mapred', owner='mapred', type='directory', action=['create_on_execute'])
+    params.HdfsResource('/mapred/system', owner='hdfs', type='directory', action=['create_on_execute'])
+    params.HdfsResource('/mr-history/done', change_permissions_for_parents=True, owner='mapred', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
+    params.HdfsResource('/atshistory/done', owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0700)
+    params.HdfsResource('/atshistory/active', owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=01777)
+    params.HdfsResource('/ams/hbase', owner='ams', type='directory', action=['create_on_execute'], mode=0775)
+    params.HdfsResource('/amshbase/staging', owner='ams', type='directory', action=['create_on_execute'], mode=0711)
+    params.HdfsResource('/user/ams/hbase', owner='ams', type='directory', action=['create_on_execute'], mode=0775)
+
 
+  def putCreatedHdfsResourcesToIgnore(env):
+    if not 'hdfs_files' in env.config:
+      Logger.info("Not creating .hdfs_resource_ignore as no resources to use.")
+      return
+    
+    file_content = ""
+    for file in env.config['hdfs_files']:
+      file_content += file['target']
+      file_content += "\n"
+      
+    with open("/var/lib/ambari-agent/data/.hdfs_resource_ignore", "a+") as fp:
+      fp.write(file_content)
+      
   env.set_params(params)
   hadoop_conf_dir = params.hadoop_conf_dir
    
@@ -272,7 +306,7 @@ with Environment() as env:
   # DON'T CHANGE THE VALUE SINCE IT'S USED TO DETERMINE WHETHER TO RUN THE COMMAND OR NOT BY READING THE MARKER FILE.
   # Oozie tmp dir should be /var/tmp/oozie and is already created by a function above.
   command = format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure} ")
-  command_to_file = format("cd {oozie_tmp_dir} && {oozie_setup_sh_current} prepare-war {oozie_secure} ")
+  command_to_file = format("cd {oozie_tmp_dir} && {oozie_setup_sh_current} prepare-war {oozie_secure} ").strip()
 
   run_prepare_war = False
   if os.path.exists(prepare_war_cmd_file):
@@ -338,7 +372,9 @@ with Environment() as env:
   copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
-
+  print "Creating hdfs directories..."
+  createHdfsResources()
+  putCreatedHdfsResourcesToIgnore(env)
   
   # jar shouldn't be used before (read comment below)
   File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
@@ -347,8 +383,12 @@ with Environment() as env:
   )
   # Create everything in one jar call (this is fast).
   # (! Before everything should be executed with action="create_on_execute/delete_on_execute" for this time-optimization to work)
-  params.HdfsResource(None, 
-               logoutput=True,
-               action="execute"
-  )
+  try:
+    params.HdfsResource(None, 
+                 logoutput=True,
+                 action="execute"
+    )
+  except:
+    os.remove("/var/lib/ambari-agent/data/.hdfs_resource_ignore")
+    raise
   print "Completed tarball copy. Ambari preupload script completed."

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -240,6 +240,7 @@ import functools
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,

+ 1 - 2
ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml

@@ -526,9 +526,8 @@
       </execute-stage>
 
       <!-- KAFKA  -->
-
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka">
-        <task xsi:type="configure" id ="hdp_2_3_0_0_kafka_broker_listeners"/>
+        <task xsi:type="configure" id ="hdp_2_3_0_0_kafka_broker_deprecate_port"/>
       </execute-stage>
 
     </group>

+ 1 - 2
ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.4.xml

@@ -564,9 +564,8 @@
       </execute-stage>
 
       <!-- KAFKA  -->
-
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka">
-        <task xsi:type="configure" id ="hdp_2_4_0_0_kafka_broker_listeners"/>
+        <task xsi:type="configure" id ="hdp_2_4_0_0_kafka_broker_deprecate_port"/>
       </execute-stage>
 
     </group>

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml

@@ -811,6 +811,8 @@
             <script>scripts/kafka_broker.py</script>
             <function>stop</function>
           </task>
+
+          <task xsi:type="configure" id="hdp_2_3_0_0_kafka_broker_deprecate_port"/>
         </pre-upgrade>
 
         <upgrade>

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml

@@ -824,6 +824,8 @@
             <script>scripts/kafka_broker.py</script>
             <function>stop</function>
           </task>
+
+          <task xsi:type="configure" id="hdp_2_4_0_0_kafka_broker_deprecate_port"/>
         </pre-upgrade>
 
         <upgrade>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/kafka-broker.xml

@@ -23,7 +23,7 @@
     <name>listeners</name>
     <value>PLAINTEXT://localhost:6667</value>
     <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <description>host and port where kafka broker will be accepting connnections. localhost will be subsituted with hostname.</description>
+    <description>host and port where kafka broker will be accepting connections. localhost will be substituted with hostname.</description>
   </property>
   <property>
     <name>controlled.shutdown.enable</name>

+ 19 - 1
ambari-server/src/main/resources/stacks/HDP/2.3/services/RANGER/themes/theme_version_2.json

@@ -940,7 +940,25 @@
         },
         {
           "config": "admin-properties/audit_db_password",
-          "subsection-name": "subsection-ranger-audit-db-row2-col2"
+          "subsection-name": "subsection-ranger-audit-db-row2-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.db"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.db}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
         },
         {
           "config": "ranger-env/xasecure.audit.destination.solr",

+ 3 - 2
ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml

@@ -893,9 +893,10 @@
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <changes>
-          <definition xsi:type="configure" id="hdp_2_3_0_0_kafka_broker_listeners">
+          <definition xsi:type="configure" id="hdp_2_3_0_0_kafka_broker_deprecate_port">
             <type>kafka-broker</type>
-            <set key="listeners" value="PLAINTEXT://localhost:6667"/>
+            <!-- Deprecate "port" property since "listeners" will be added. -->
+            <transfer operation="delete" delete-key="port"/>
           </definition>
         </changes>
       </component>

+ 1 - 2
ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml

@@ -316,9 +316,8 @@
       </execute-stage>
 
       <!-- KAFKA  -->
-      <!--TODO: remove? Used for non-rolling upgrade only-->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka">
-        <task xsi:type="configure" id ="hdp_2_4_0_0_kafka_broker_listeners"/>
+        <task xsi:type="configure" id ="hdp_2_4_0_0_kafka_broker_deprecate_port"/>
       </execute-stage>
 
     </group>

+ 4 - 0
ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml

@@ -740,6 +740,10 @@
 
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
+        <pre-upgrade>
+          <task xsi:type="configure" id ="hdp_2_4_0_0_kafka_broker_deprecate_port"/>
+        </pre-upgrade>
+        
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

+ 3 - 2
ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml

@@ -157,9 +157,10 @@
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <changes>
-          <definition xsi:type="configure" id="hdp_2_4_0_0_kafka_broker_listeners">
+          <definition xsi:type="configure" id="hdp_2_4_0_0_kafka_broker_deprecate_port">
             <type>kafka-broker</type>
-            <set key="listeners" value="PLAINTEXT://localhost:6667"/>
+            <!-- Deprecate "port" property since "listeners" will be added. -->
+            <transfer operation="delete" delete-key="port"/>
           </definition>
         </changes>
       </component>

+ 25 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/metainfo.xml

@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <extends>2.4</extends>
+  <minJdk>1.7</minJdk>
+  <maxJdk>1.8</maxJdk>
+</metainfo>

+ 92 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/repos/repoinfo.xml

@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.5</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.5</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos7</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.5</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.5</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="debian7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian7/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.5</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/debian6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu14">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/2.x/updates/2.5.0.0</baseurl>
+      <repoid>HDP-2.5</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/ACCUMULO/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ACCUMULO</name>
+      <version>1.7.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ATLAS</name>
+      <version>0.5.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/FALCON/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FALCON</name>
+      <version>0.6.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/FLUME/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <version>1.5.2.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/HBASE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <version>1.1.2.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <version>1.2.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <version>0.9.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 25 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/KERBEROS/metainfo.xml

@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/KNOX/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <version>0.6.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/MAHOUT/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAHOUT</name>
+      <version>0.9.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/OOZIE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <version>4.2.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/PIG/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <version>0.15.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER/metainfo.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER</name>
+      <version>0.5.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/metainfo.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER_KMS</name>
+      <version>0.5.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/SLIDER/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLIDER</name>
+      <version>0.80.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/metainfo.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SPARK</name>
+      <version>1.6.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/SQOOP/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <version>1.4.6.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <version>0.10.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/TEZ/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TEZ</name>
+      <version>0.7.0.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <version>2.7.1.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/ZOOKEEPER/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <version>3.4.6.2.5</version>
+    </service>
+  </services>
+</metainfo>

+ 22 - 0
ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py

@@ -0,0 +1,22 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+class HDP25StackAdvisor(HDP24StackAdvisor):
+  pass

+ 109 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/services/GroupPrivilegeServiceTest.java

@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
+import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.apache.ambari.server.controller.spi.Resource.Type;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Unit tests for GroupService.
+ */
+public class GroupPrivilegeServiceTest extends BaseServiceTest {
+
+  public List<ServiceTestInvocation> getTestInvocations() throws Exception {
+    List<ServiceTestInvocation> listInvocations = new ArrayList<ServiceTestInvocation>();
+
+    GroupPrivilegeService groupPrivilegeService;
+    Method m;
+    Object[] args;
+
+    //getPrivilege
+    groupPrivilegeService = new TestGroupPrivilegeService();
+    m = groupPrivilegeService.getClass().getMethod("getPrivilege", HttpHeaders.class, UriInfo.class, String.class);
+    args = new Object[] {getHttpHeaders(), getUriInfo(), "id"};
+    listInvocations.add(new ServiceTestInvocation(Request.Type.GET, groupPrivilegeService, m, args, null));
+
+    //getPrivileges
+    groupPrivilegeService = new TestGroupPrivilegeService();
+    m = groupPrivilegeService.getClass().getMethod("getPrivileges", HttpHeaders.class, UriInfo.class);
+    args = new Object[] {getHttpHeaders(), getUriInfo()};
+    listInvocations.add(new ServiceTestInvocation(Request.Type.GET, groupPrivilegeService, m, args, null));
+
+    return listInvocations;
+  }
+
+  @Test
+  public void testDisabledMethods() {
+    final HttpHeaders headers = EasyMock.createNiceMock(HttpHeaders.class);
+    final UriInfo uriInfo = EasyMock.createNiceMock(UriInfo.class);
+    final GroupPrivilegeService service = new TestGroupPrivilegeService();
+
+    final List<Response> disabledMethods = new ArrayList<Response>();
+    disabledMethods.add(service.createPrivilege("test", headers, uriInfo));
+    disabledMethods.add(service.updatePrivilege("test", headers, uriInfo, "test"));
+    disabledMethods.add(service.updatePrivileges("test", headers, uriInfo));
+    disabledMethods.add(service.deletePrivilege(headers, uriInfo, "test"));
+    disabledMethods.add(service.deletePrivileges("test", headers, uriInfo));
+
+    for (Response response: disabledMethods) {
+      Assert.assertEquals(HttpServletResponse.SC_NOT_IMPLEMENTED, response.getStatus());
+    }
+  }
+
+  private class TestGroupPrivilegeService extends GroupPrivilegeService {
+
+    public TestGroupPrivilegeService() {
+      super("group");
+    }
+
+    @Override
+    protected ResourceInstance createResource(Type type, Map<Type, String> mapIds) {
+      return getTestResource();
+    }
+
+    @Override
+    RequestFactory getRequestFactory() {
+      return getTestRequestFactory();
+    }
+
+    @Override
+    protected RequestBodyParser getBodyParser() {
+      return getTestBodyParser();
+    }
+
+    @Override
+    protected ResultSerializer getResultSerializer() {
+      return getTestResultSerializer();
+    }
+  }
+}

+ 17 - 12
ambari-server/src/test/java/org/apache/ambari/server/checks/CheckDatabaseHelperTest.java

@@ -260,22 +260,27 @@ public class CheckDatabaseHelperTest {
     expect(stackResultSet.getString("stack_version")).andReturn("2.2");
     expect(mockDBDbAccessor.getConnection()).andReturn(mockConnection);
     expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
-    expect(mockStatement.executeQuery("select service_name from clusterservices where service_name not in (select service_name from serviceconfig where group_id is null)")).andReturn(mockResultSet);
+    expect(mockStatement.executeQuery("select c.cluster_name, service_name from clusterservices cs " +
+            "join clusters c on cs.cluster_id=c.cluster_id " +
+            "where service_name not in (select service_name from serviceconfig sc where sc.cluster_id=cs.cluster_id and sc.service_name=cs.service_name and sc.group_id is null)")).andReturn(mockResultSet);
     expect(mockStatement.executeQuery("select service_name from serviceconfig where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null")).andReturn(mockResultSet);
-    expect(mockStatement.executeQuery("select s.stack_name, s.stack_version from clusters c join stack s on c.desired_stack_id = s.stack_id")).andReturn(stackResultSet);
-    expect(mockStatement.executeQuery("select cs.service_name, type_name, sc.version from clusterservices cs " +
-            "join serviceconfig sc on cs.service_name=sc.service_name " +
+    expect(mockStatement.executeQuery("select c.cluster_name, s.stack_name, s.stack_version from clusters c " +
+            "join stack s on c.desired_stack_id = s.stack_id")).andReturn(stackResultSet);
+    expect(mockStatement.executeQuery("select c.cluster_name, cs.service_name, type_name, sc.version from clusterservices cs " +
+            "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
-            "join clusterconfig cc on scm.config_id=cc.config_id " +
+            "join clusterconfig cc on scm.config_id=cc.config_id and sc.cluster_id=cc.cluster_id " +
+            "join clusters c on cc.cluster_id=c.cluster_id " +
             "where sc.group_id is null " +
-            "group by cs.service_name, type_name, sc.version")).andReturn(serviceConfigResultSet);
-    expect(mockStatement.executeQuery("select cs.service_name,cc.type_name from clusterservices cs " +
-            "join serviceconfig sc on cs.service_name=sc.service_name " +
+            "group by c.cluster_name, cs.service_name, type_name, sc.version")).andReturn(serviceConfigResultSet);
+    expect(mockStatement.executeQuery("select c.cluster_name, cs.service_name,cc.type_name from clusterservices cs " +
+            "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " +
             "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " +
-            "join clusterconfig cc on scm.config_id=cc.config_id " +
-            "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag " +
-            "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name) " +
-            "group by cs.service_name,cc.type_name " +
+            "join clusterconfig cc on scm.config_id=cc.config_id and cc.cluster_id=sc.cluster_id " +
+            "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag and cc.cluster_id=ccm.cluster_id " +
+            "join clusters c on ccm.cluster_id=c.cluster_id " +
+            "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " +
+            "group by c.cluster_name,cs.service_name,cc.type_name " +
             "having sum(ccm.selected) < 1")).andReturn(mockResultSet);
 
     CheckDatabaseHelper checkDatabaseHelper = new CheckDatabaseHelper(mockDBDbAccessor, mockInjector, null);

+ 362 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/GroupPrivilegeResourceProviderTest.java

@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.GroupDAO;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.MemberEntity;
+import org.apache.ambari.server.orm.entities.PermissionEntity;
+import org.apache.ambari.server.orm.entities.PrincipalEntity;
+import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
+import org.apache.ambari.server.orm.entities.PrivilegeEntity;
+import org.apache.ambari.server.orm.entities.ResourceEntity;
+import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
+import org.apache.ambari.server.orm.entities.GroupEntity;
+import org.apache.ambari.server.orm.entities.ViewEntity;
+import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
+import org.apache.ambari.server.security.TestAuthenticationFactory;
+import org.apache.ambari.server.security.authorization.AuthorizationException;
+import org.apache.ambari.server.security.authorization.ResourceType;
+import org.easymock.EasyMockSupport;
+import org.junit.Test;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.core.context.SecurityContextHolder;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.expect;
+
+/**
+ * GroupPrivilegeResourceProvider tests.
+ */
+public class GroupPrivilegeResourceProviderTest extends EasyMockSupport {
+
+  @Test(expected = SystemException.class)
+  public void testCreateResources() throws Exception {
+    SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createClusterAdministrator("user1", 2L));
+    GroupPrivilegeResourceProvider resourceProvider = new GroupPrivilegeResourceProvider();
+    resourceProvider.createResources(createNiceMock(Request.class));
+  }
+
+  @Test
+  public void testGetResources_Administrator() throws Exception {
+    getResourcesTest(TestAuthenticationFactory.createAdministrator("admin"), "Group1");
+  }
+
+  @Test(expected = AuthorizationException.class)
+  public void testGetResources_NonAdministrator() throws Exception {
+    getResourcesTest(TestAuthenticationFactory.createClusterAdministrator("user1", 2L), "Group1");
+  }
+  
+  @Test(expected = SystemException.class)
+  public void testUpdateResources() throws Exception {
+    SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createClusterAdministrator("user1", 2L));
+    GroupPrivilegeResourceProvider resourceProvider = new GroupPrivilegeResourceProvider();
+    resourceProvider.updateResources(createNiceMock(Request.class), createNiceMock(Predicate.class));
+  }
+
+  @Test(expected = SystemException.class)
+  public void testDeleteResources() throws Exception {
+    SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createClusterAdministrator("user1", 2L));
+    GroupPrivilegeResourceProvider resourceProvider = new GroupPrivilegeResourceProvider();
+    resourceProvider.deleteResources(createNiceMock(Predicate.class));
+  }
+
+  @Test
+  public void testToResource_AMBARI() {
+    PermissionEntity permissionEntity = createMock(PermissionEntity.class);
+    expect(permissionEntity.getPermissionName()).andReturn("ADMINISTRATOR").atLeastOnce();
+    expect(permissionEntity.getPermissionLabel()).andReturn("Administrator").atLeastOnce();
+
+    PrincipalTypeEntity principalTypeEntity = createMock(PrincipalTypeEntity.class);
+    expect(principalTypeEntity.getName()).andReturn("GROUP").atLeastOnce();
+
+    PrincipalEntity principalEntity = createMock(PrincipalEntity.class);
+    expect(principalEntity.getPrincipalType()).andReturn(principalTypeEntity).atLeastOnce();
+
+    ResourceTypeEntity resourceTypeEntity = createMock(ResourceTypeEntity.class);
+    expect(resourceTypeEntity.getName()).andReturn("AMBARI").atLeastOnce();
+
+    ResourceEntity resourceEntity = createMock(ResourceEntity.class);
+    expect(resourceEntity.getResourceType()).andReturn(resourceTypeEntity).atLeastOnce();
+
+    PrivilegeEntity privilegeEntity = createMock(PrivilegeEntity.class);
+    expect(privilegeEntity.getId()).andReturn(1).atLeastOnce();
+    expect(privilegeEntity.getPermission()).andReturn(permissionEntity).atLeastOnce();
+    expect(privilegeEntity.getPrincipal()).andReturn(principalEntity).atLeastOnce();
+    expect(privilegeEntity.getResource()).andReturn(resourceEntity).atLeastOnce();
+
+    GroupEntity groupEntity = createMock(GroupEntity.class);
+    expect(groupEntity.getGroupName()).andReturn("group1").atLeastOnce();
+
+    GroupDAO groupDAO = createMock(GroupDAO.class);
+    expect(groupDAO.findGroupByPrincipal(anyObject(PrincipalEntity.class))).andReturn(groupEntity).anyTimes();
+
+    ClusterDAO clusterDAO = createMock(ClusterDAO.class);
+    ViewInstanceDAO viewInstanceDAO = createMock(ViewInstanceDAO.class);
+
+    replayAll();
+
+    GroupPrivilegeResourceProvider.init(clusterDAO, groupDAO, viewInstanceDAO);
+    GroupPrivilegeResourceProvider provider = new GroupPrivilegeResourceProvider();
+    Resource resource = provider.toResource(privilegeEntity, "group1", provider.getPropertyIds());
+
+    Assert.assertEquals(ResourceType.AMBARI.name(), resource.getPropertyValue(GroupPrivilegeResourceProvider.PRIVILEGE_TYPE_PROPERTY_ID));
+
+    verifyAll();
+  }
+
+  @Test
+  public void testToResource_CLUSTER() {
+    PermissionEntity permissionEntity = createMock(PermissionEntity.class);
+    expect(permissionEntity.getPermissionName()).andReturn("CLUSTER.ADMINISTRATOR").atLeastOnce();
+    expect(permissionEntity.getPermissionLabel()).andReturn("Cluster Administrator").atLeastOnce();
+
+    PrincipalTypeEntity principalTypeEntity = createMock(PrincipalTypeEntity.class);
+    expect(principalTypeEntity.getName()).andReturn("GROUP").atLeastOnce();
+
+    PrincipalEntity principalEntity = createMock(PrincipalEntity.class);
+    expect(principalEntity.getPrincipalType()).andReturn(principalTypeEntity).atLeastOnce();
+
+    ClusterEntity clusterEntity = createMock(ClusterEntity.class);
+    expect(clusterEntity.getClusterName()).andReturn("TestCluster").atLeastOnce();
+
+    ResourceTypeEntity resourceTypeEntity = createMock(ResourceTypeEntity.class);
+    expect(resourceTypeEntity.getName()).andReturn("CLUSTER").atLeastOnce();
+
+    ResourceEntity resourceEntity = createMock(ResourceEntity.class);
+    expect(resourceEntity.getId()).andReturn(1L).atLeastOnce();
+    expect(resourceEntity.getResourceType()).andReturn(resourceTypeEntity).atLeastOnce();
+
+    PrivilegeEntity privilegeEntity = createMock(PrivilegeEntity.class);
+    expect(privilegeEntity.getId()).andReturn(1).atLeastOnce();
+    expect(privilegeEntity.getPermission()).andReturn(permissionEntity).atLeastOnce();
+    expect(privilegeEntity.getPrincipal()).andReturn(principalEntity).atLeastOnce();
+    expect(privilegeEntity.getResource()).andReturn(resourceEntity).atLeastOnce();
+
+    GroupEntity groupEntity = createMock(GroupEntity.class);
+    expect(groupEntity.getGroupName()).andReturn("group1").atLeastOnce();
+
+    ClusterDAO clusterDAO = createMock(ClusterDAO.class);
+    expect(clusterDAO.findByResourceId(1L)).andReturn(clusterEntity).atLeastOnce();
+
+    ViewInstanceDAO viewInstanceDAO = createMock(ViewInstanceDAO.class);
+
+    GroupDAO groupDAO = createMock(GroupDAO.class);
+    expect(groupDAO.findGroupByPrincipal(anyObject(PrincipalEntity.class))).andReturn(groupEntity).anyTimes();
+
+    replayAll();
+
+    GroupPrivilegeResourceProvider.init(clusterDAO, groupDAO, viewInstanceDAO);
+    GroupPrivilegeResourceProvider provider = new GroupPrivilegeResourceProvider();
+    Resource resource = provider.toResource(privilegeEntity, "group1", provider.getPropertyIds());
+
+    Assert.assertEquals("TestCluster", resource.getPropertyValue(ClusterPrivilegeResourceProvider.PRIVILEGE_CLUSTER_NAME_PROPERTY_ID));
+    Assert.assertEquals(ResourceType.CLUSTER.name(), resource.getPropertyValue(GroupPrivilegeResourceProvider.PRIVILEGE_TYPE_PROPERTY_ID));
+
+    verifyAll();
+  }
+
+  @Test
+  public void testToResource_VIEW() {
+    PermissionEntity permissionEntity = createMock(PermissionEntity.class);
+    expect(permissionEntity.getPermissionName()).andReturn("CLUSTER.ADMINISTRATOR").atLeastOnce();
+    expect(permissionEntity.getPermissionLabel()).andReturn("Cluster Administrator").atLeastOnce();
+
+    PrincipalTypeEntity principalTypeEntity = createMock(PrincipalTypeEntity.class);
+    expect(principalTypeEntity.getName()).andReturn("GROUP").atLeastOnce();
+
+    PrincipalEntity principalEntity = createMock(PrincipalEntity.class);
+    expect(principalEntity.getPrincipalType()).andReturn(principalTypeEntity).atLeastOnce();
+
+    ViewEntity viewEntity = createMock(ViewEntity.class);
+    expect(viewEntity.getCommonName()).andReturn("TestView").atLeastOnce();
+    expect(viewEntity.getVersion()).andReturn("1.2.3.4").atLeastOnce();
+
+    ViewInstanceEntity viewInstanceEntity = createMock(ViewInstanceEntity.class);
+    expect(viewInstanceEntity.getViewEntity()).andReturn(viewEntity).atLeastOnce();
+    expect(viewInstanceEntity.getName()).andReturn("Test View").atLeastOnce();
+
+    ResourceTypeEntity resourceTypeEntity = createMock(ResourceTypeEntity.class);
+    expect(resourceTypeEntity.getName()).andReturn("VIEW").atLeastOnce();
+
+    ResourceEntity resourceEntity = createMock(ResourceEntity.class);
+    expect(resourceEntity.getId()).andReturn(1L).atLeastOnce();
+    expect(resourceEntity.getResourceType()).andReturn(resourceTypeEntity).atLeastOnce();
+
+    PrivilegeEntity privilegeEntity = createMock(PrivilegeEntity.class);
+    expect(privilegeEntity.getId()).andReturn(1).atLeastOnce();
+    expect(privilegeEntity.getPermission()).andReturn(permissionEntity).atLeastOnce();
+    expect(privilegeEntity.getPrincipal()).andReturn(principalEntity).atLeastOnce();
+    expect(privilegeEntity.getResource()).andReturn(resourceEntity).atLeastOnce();
+
+    GroupEntity groupEntity = createMock(GroupEntity.class);
+    expect(groupEntity.getGroupName()).andReturn("group1").atLeastOnce();
+
+    ClusterDAO clusterDAO = createMock(ClusterDAO.class);
+    
+    ViewInstanceDAO viewInstanceDAO = createMock(ViewInstanceDAO.class);
+    expect(viewInstanceDAO.findByResourceId(1L)).andReturn(viewInstanceEntity).atLeastOnce();
+
+    GroupDAO groupDAO = createMock(GroupDAO.class);
+    expect(groupDAO.findGroupByPrincipal(anyObject(PrincipalEntity.class))).andReturn(groupEntity).anyTimes();
+
+    replayAll();
+
+    GroupPrivilegeResourceProvider.init(clusterDAO, groupDAO, viewInstanceDAO);
+    GroupPrivilegeResourceProvider provider = new GroupPrivilegeResourceProvider();
+    Resource resource = provider.toResource(privilegeEntity, "group1", provider.getPropertyIds());
+
+    Assert.assertEquals("Test View", resource.getPropertyValue(ViewPrivilegeResourceProvider.PRIVILEGE_INSTANCE_NAME_PROPERTY_ID));
+    Assert.assertEquals("TestView", resource.getPropertyValue(ViewPrivilegeResourceProvider.PRIVILEGE_VIEW_NAME_PROPERTY_ID));
+    Assert.assertEquals("1.2.3.4", resource.getPropertyValue(ViewPrivilegeResourceProvider.PRIVILEGE_VIEW_VERSION_PROPERTY_ID));
+    Assert.assertEquals(ResourceType.VIEW.name(), resource.getPropertyValue(GroupPrivilegeResourceProvider.PRIVILEGE_TYPE_PROPERTY_ID));
+
+    verifyAll();
+  }
+
+  @Test
+  public void testToResource_SpecificVIEW() {
+    PermissionEntity permissionEntity = createMock(PermissionEntity.class);
+    expect(permissionEntity.getPermissionName()).andReturn("CLUSTER.ADMINISTRATOR").atLeastOnce();
+    expect(permissionEntity.getPermissionLabel()).andReturn("Cluster Administrator").atLeastOnce();
+
+    PrincipalTypeEntity principalTypeEntity = createMock(PrincipalTypeEntity.class);
+    expect(principalTypeEntity.getName()).andReturn("GROUP").atLeastOnce();
+
+    PrincipalEntity principalEntity = createMock(PrincipalEntity.class);
+    expect(principalEntity.getPrincipalType()).andReturn(principalTypeEntity).atLeastOnce();
+
+    ViewEntity viewEntity = createMock(ViewEntity.class);
+    expect(viewEntity.getCommonName()).andReturn("TestView").atLeastOnce();
+    expect(viewEntity.getVersion()).andReturn("1.2.3.4").atLeastOnce();
+
+    ViewInstanceEntity viewInstanceEntity = createMock(ViewInstanceEntity.class);
+    expect(viewInstanceEntity.getViewEntity()).andReturn(viewEntity).atLeastOnce();
+    expect(viewInstanceEntity.getName()).andReturn("Test View").atLeastOnce();
+
+    ResourceTypeEntity resourceTypeEntity = createMock(ResourceTypeEntity.class);
+    expect(resourceTypeEntity.getName()).andReturn("TestView{1.2.3.4}").atLeastOnce();
+
+    ResourceEntity resourceEntity = createMock(ResourceEntity.class);
+    expect(resourceEntity.getId()).andReturn(1L).atLeastOnce();
+    expect(resourceEntity.getResourceType()).andReturn(resourceTypeEntity).atLeastOnce();
+
+    PrivilegeEntity privilegeEntity = createMock(PrivilegeEntity.class);
+    expect(privilegeEntity.getId()).andReturn(1).atLeastOnce();
+    expect(privilegeEntity.getPermission()).andReturn(permissionEntity).atLeastOnce();
+    expect(privilegeEntity.getPrincipal()).andReturn(principalEntity).atLeastOnce();
+    expect(privilegeEntity.getResource()).andReturn(resourceEntity).atLeastOnce();
+
+    GroupEntity groupEntity = createMock(GroupEntity.class);
+    expect(groupEntity.getGroupName()).andReturn("group1").atLeastOnce();
+
+    ClusterDAO clusterDAO = createMock(ClusterDAO.class);
+
+    ViewInstanceDAO viewInstanceDAO = createMock(ViewInstanceDAO.class);
+    expect(viewInstanceDAO.findByResourceId(1L)).andReturn(viewInstanceEntity).atLeastOnce();
+
+    GroupDAO groupDAO = createMock(GroupDAO.class);
+    expect(groupDAO.findGroupByPrincipal(anyObject(PrincipalEntity.class))).andReturn(groupEntity).anyTimes();
+
+    replayAll();
+
+    GroupPrivilegeResourceProvider.init(clusterDAO, groupDAO, viewInstanceDAO);
+    GroupPrivilegeResourceProvider provider = new GroupPrivilegeResourceProvider();
+    Resource resource = provider.toResource(privilegeEntity, "group1", provider.getPropertyIds());
+
+    Assert.assertEquals("Test View", resource.getPropertyValue(ViewPrivilegeResourceProvider.PRIVILEGE_INSTANCE_NAME_PROPERTY_ID));
+    Assert.assertEquals("TestView", resource.getPropertyValue(ViewPrivilegeResourceProvider.PRIVILEGE_VIEW_NAME_PROPERTY_ID));
+    Assert.assertEquals("1.2.3.4", resource.getPropertyValue(ViewPrivilegeResourceProvider.PRIVILEGE_VIEW_VERSION_PROPERTY_ID));
+    Assert.assertEquals(ResourceType.VIEW.name(), resource.getPropertyValue(GroupPrivilegeResourceProvider.PRIVILEGE_TYPE_PROPERTY_ID));
+
+    verifyAll();
+  }
+
+  private void getResourcesTest(Authentication authentication, String requestedGroupName) throws Exception {
+    final GroupPrivilegeResourceProvider resourceProvider = new GroupPrivilegeResourceProvider();
+    final GroupDAO groupDAO = createNiceMock(GroupDAO.class);
+    final ClusterDAO clusterDAO = createNiceMock(ClusterDAO.class);
+    final ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
+    final GroupEntity groupEntity = createNiceMock(GroupEntity.class);
+    final PrincipalEntity principalEntity = createNiceMock(PrincipalEntity.class);
+    final PrivilegeEntity privilegeEntity = createNiceMock(PrivilegeEntity.class);
+    final PermissionEntity permissionEntity = createNiceMock(PermissionEntity.class);
+    final PrincipalTypeEntity principalTypeEntity = createNiceMock(PrincipalTypeEntity.class);
+    final ResourceEntity resourceEntity = createNiceMock(ResourceEntity.class);
+    final ResourceTypeEntity resourceTypeEntity = createNiceMock(ResourceTypeEntity.class);
+
+    expect(groupDAO.findGroupByName(requestedGroupName)).andReturn(groupEntity).anyTimes();
+    expect(groupEntity.getPrincipal()).andReturn(principalEntity).anyTimes();
+    expect(groupEntity.getMemberEntities()).andReturn(Collections.<MemberEntity>emptySet()).anyTimes();
+    expect(privilegeEntity.getPermission()).andReturn(permissionEntity).anyTimes();
+    expect(privilegeEntity.getPrincipal()).andReturn(principalEntity).anyTimes();
+    expect(principalEntity.getPrincipalType()).andReturn(principalTypeEntity).anyTimes();
+    expect(principalTypeEntity.getName()).andReturn(PrincipalTypeEntity.GROUP_PRINCIPAL_TYPE_NAME).anyTimes();
+    expect(principalEntity.getPrivileges()).andReturn(new HashSet<PrivilegeEntity>() {
+      {
+        add(privilegeEntity);
+      }
+    }).anyTimes();
+    expect(groupDAO.findGroupByPrincipal(anyObject(PrincipalEntity.class))).andReturn(groupEntity).anyTimes();
+    expect(groupEntity.getGroupName()).andReturn(requestedGroupName).anyTimes();
+    expect(privilegeEntity.getResource()).andReturn(resourceEntity).anyTimes();
+    expect(resourceEntity.getResourceType()).andReturn(resourceTypeEntity).anyTimes();
+    expect(resourceTypeEntity.getName()).andReturn(ResourceType.AMBARI.name());
+
+    replayAll();
+
+    GroupPrivilegeResourceProvider.init(clusterDAO, groupDAO, viewInstanceDAO);
+
+    final Set<String> propertyIds = new HashSet<String>();
+    propertyIds.add(GroupPrivilegeResourceProvider.PRIVILEGE_GROUP_NAME_PROPERTY_ID);
+
+    final Predicate predicate = new PredicateBuilder()
+        .property(GroupPrivilegeResourceProvider.PRIVILEGE_GROUP_NAME_PROPERTY_ID)
+        .equals(requestedGroupName)
+        .toPredicate();
+    Request request = PropertyHelper.getReadRequest(propertyIds);
+
+    // Set the authenticated group to a administrator
+    SecurityContextHolder.getContext().setAuthentication(authentication);
+
+    Set<Resource> resources = resourceProvider.getResources(request, predicate);
+
+    Assert.assertEquals(1, resources.size());
+    for (Resource resource : resources) {
+      String groupName = (String) resource.getPropertyValue(GroupPrivilegeResourceProvider.PRIVILEGE_GROUP_NAME_PROPERTY_ID);
+      Assert.assertEquals(requestedGroupName, groupName);
+    }
+
+    verifyAll();
+  }
+
+}

+ 66 - 26
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java

@@ -19,91 +19,115 @@
 package org.apache.ambari.server.upgrade;
 
 
+import com.google.inject.Binder;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+import com.google.inject.Module;
 import com.google.inject.Provider;
-import com.google.inject.persist.PersistService;
 import junit.framework.Assert;
+import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.StackEntity;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 import javax.persistence.EntityManager;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
+import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
 
 public class UpgradeCatalog240Test {
-  private Injector injector;
+  private static Injector injector;
   private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
   private EntityManager entityManager = createNiceMock(EntityManager.class);
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-  private StackEntity desiredStackEntity;
 
 
+  @BeforeClass
+  public static void classSetUp() {
+    injector = Guice.createInjector(new InMemoryDefaultTestModule());
+    injector.getInstance(GuiceJpaInitializer.class);
+  }
 
   @Before
   public void init() {
     reset(entityManagerProvider);
     expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
     replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
 
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
+    injector.getInstance(UpgradeCatalogHelper.class);
     // inject AmbariMetaInfo to ensure that stacks get populated in the DB
     injector.getInstance(AmbariMetaInfo.class);
     // load the stack entity
     StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
+    stackDAO.find("HDP", "2.2.0");
   }
 
   @After
   public void tearDown() {
-    injector.getInstance(PersistService.class).stop();
   }
 
   @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    UpgradeCatalog240 upgradeCatalog240 = injector.getInstance(UpgradeCatalog240.class);
-
+  public void testExecuteDDLUpdates() throws SQLException, AmbariException {
     Capture<DBAccessor.DBColumnInfo> capturedColumnInfo = newCapture();
+    final DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
+    Configuration configuration = createNiceMock(Configuration.class);
+    Connection connection = createNiceMock(Connection.class);
+    Statement statement = createNiceMock(Statement.class);
+    ResultSet resultSet = createNiceMock(ResultSet.class);
+    Capture<List<DBAccessor.DBColumnInfo>> capturedSettingColumns = EasyMock.newCapture();
 
-    DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
     dbAccessor.addColumn(eq("adminpermission"), capture(capturedColumnInfo));
-    expectLastCall().once();
-
-    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
-    field.set(upgradeCatalog240, dbAccessor);
+    dbAccessor.createTable(eq("setting"), capture(capturedSettingColumns), eq("id"));
+    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
+    expect(dbAccessor.getConnection()).andReturn(connection);
+    expect(connection.createStatement()).andReturn(statement);
+    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet);
 
     replay(dbAccessor);
-
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        binder.bind(EntityManager.class).toInstance(entityManager);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog240 upgradeCatalog240 = injector.getInstance(UpgradeCatalog240.class);
     upgradeCatalog240.executeDDLUpdates();
 
-    verify(dbAccessor);
-
     DBAccessor.DBColumnInfo columnInfo = capturedColumnInfo.getValue();
     Assert.assertNotNull(columnInfo);
     Assert.assertEquals(UpgradeCatalog240.SORT_ORDER_COL, columnInfo.getName());
@@ -111,12 +135,29 @@ public class UpgradeCatalog240Test {
     Assert.assertEquals(Short.class, columnInfo.getType());
     Assert.assertEquals(1, columnInfo.getDefaultValue());
     Assert.assertEquals(false, columnInfo.isNullable());
+
+    Map<String, Class> expectedCaptures = new HashMap<>();
+    expectedCaptures.put("id", Long.class);
+    expectedCaptures.put("name", String.class);
+    expectedCaptures.put("setting_type", String.class);
+    expectedCaptures.put("content", String.class);
+    expectedCaptures.put("updated_by", String.class);
+    expectedCaptures.put("update_timestamp", Long.class);
+
+    Map<String, Class> actualCaptures = new HashMap<>();
+    for(DBAccessor.DBColumnInfo settingColumnInfo : capturedSettingColumns.getValue()) {
+      actualCaptures.put(settingColumnInfo.getName(), settingColumnInfo.getType());
+    }
+    assertEquals(expectedCaptures, actualCaptures);
+
+    verify(dbAccessor);
   }
 
   @Test
   public void testExecuteDMLUpdates() throws Exception {
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateAlerts = UpgradeCatalog240.class.getDeclaredMethod("updateAlerts");
+    Method addSettingPermission = UpgradeCatalog240.class.getDeclaredMethod("addSettingPermission");
 
     Capture<String> capturedStatements = newCapture(CaptureType.ALL);
 
@@ -126,15 +167,15 @@ public class UpgradeCatalog240Test {
     UpgradeCatalog240 upgradeCatalog240 = createMockBuilder(UpgradeCatalog240.class)
             .addMockedMethod(addNewConfigurationsFromXml)
             .addMockedMethod(updateAlerts)
+            .addMockedMethod(addSettingPermission)
             .createMock();
 
     Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
     field.set(upgradeCatalog240, dbAccessor);
 
     upgradeCatalog240.addNewConfigurationsFromXml();
-    expectLastCall().once();
     upgradeCatalog240.updateAlerts();
-    expectLastCall().once();
+    upgradeCatalog240.addSettingPermission();
 
     replay(upgradeCatalog240, dbAccessor);
 
@@ -171,11 +212,10 @@ public class UpgradeCatalog240Test {
 
     UpgradeCatalog240 upgradeCatalog240 = new UpgradeCatalog240(injector);
     String inputSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"connection.timeout\",\"display_name\":\"Connection Timeout\",\"value\":5.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before this alert is considered to be CRITICAL\",\"units\":\"seconds\",\"threshold\":\"CRITICAL\"}]}";
-    List<String> params = new ArrayList<String>(Arrays.asList("connection.timeout", "checkpoint.time.warning.threshold", "checkpoint.time.critical.threshold"));
+    List<String> params = new ArrayList<>(Arrays.asList("connection.timeout", "checkpoint.time.warning.threshold", "checkpoint.time.critical.threshold"));
     String expectedSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"connection.timeout\",\"display_name\":\"Connection Timeout\",\"value\":5.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before this alert is considered to be CRITICAL\",\"units\":\"seconds\",\"threshold\":\"CRITICAL\"},{\"name\":\"checkpoint.time.warning.threshold\",\"display_name\":\"Checkpoint Warning\",\"value\":2.0,\"type\":\"PERCENT\",\"description\":\"The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.\",\"units\":\"%\",\"threshold\":\"WARNING\"},{\"name\":\"checkpoint.time.critical.threshold\",\"display_name\":\"Checkpoint Critical\",\"value\":2.0,\"type\":\"PERCENT\",\"description\":\"The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.\",\"units\":\"%\",\"threshold\":\"CRITICAL\"}]}";
 
     String result = upgradeCatalog240.addParam(inputSource, params);
     Assert.assertEquals(result, expectedSource);
   }
-
 }

+ 1 - 2
ambari-server/src/test/python/TestAmbariServer.py

@@ -6901,8 +6901,7 @@ class TestAmbariServer(TestCase):
     self.assertTrue(ensureCanStartUnderCurrentUserMock.called)
     self.assertTrue(generateEnvMock.called)
 
-    self.assertEquals(runOSCommandMock.call_args[0][0], '/path/to/java -cp test:path12 org.apache.ambari.server.checks.CheckDatabaseHelper'
-                                                        ' > /var/log/ambari-server/ambari-server.log 2>&1')
+    self.assertEquals(runOSCommandMock.call_args[0][0], '/path/to/java -cp test:path12 org.apache.ambari.server.checks.CheckDatabaseHelper')
 
     pass
 

+ 1 - 1
ambari-server/src/test/python/TestSetupAgent.py

@@ -307,7 +307,7 @@ class TestSetupAgent(TestCase):
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(subprocess, 'Popen')
   def test_execOsCommand(self, Popen_mock):
-    self.assertFalse(setup_agent.execOsCommand("hostname -f") == None)
+    self.assertIsNone(setup_agent.execOsCommand("hostname -f"))
 
   @only_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))

+ 2 - 2
ambari-server/src/test/python/custom_actions/test_ru_execute_tasks.py

@@ -129,7 +129,7 @@ class TestRUExecuteTasks(RMFTestCase):
     ru_execute.actionexecute(None)
 
     call_mock.assert_called_with(
-        "/usr/bin/ambari-python-wrap /var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package" + os.sep +
+        "source /var/lib/ambari-agent/ambari-env.sh ; /usr/bin/ambari-python-wrap /var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package" + os.sep +
         "scripts/namenode.py prepare_rolling_upgrade /tmp", logoutput=True, quiet=True)
     pass
 
@@ -176,6 +176,6 @@ class TestRUExecuteTasks(RMFTestCase):
     ru_execute = ExecuteUpgradeTasks()
     ru_execute.actionexecute(None)
 
-    call_mock.assert_called_with("/usr/bin/ambari-python-wrap /var/lib/ambari-agent/cache/custom_actions" + os.sep +
+    call_mock.assert_called_with("source /var/lib/ambari-agent/ambari-env.sh ; /usr/bin/ambari-python-wrap /var/lib/ambari-agent/cache/custom_actions" + os.sep +
                                  "scripts/namenode.py prepare_rolling_upgrade /tmp", logoutput=True, quiet=True)
     pass

+ 3 - 3
ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py

@@ -311,7 +311,7 @@ class TestMetricsCollector(RMFTestCase):
                                   mode = 0775,
                                   hadoop_conf_dir = '/etc/hadoop/conf',
                                   type = 'directory',
-                                  action = ['create_on_execute'],
+                                  action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                                   hdfs_site=self.getConfig()['configurations']['hdfs-site'],
                                   principal_name=UnknownConfigurationMock(),
                                   default_fs='hdfs://c6401.ambari.apache.org:8020',
@@ -327,7 +327,7 @@ class TestMetricsCollector(RMFTestCase):
                                   mode = 0711,
                                   hadoop_conf_dir = '/etc/hadoop/conf',
                                   type = 'directory',
-                                  action = ['create_on_execute'],
+                                  action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                                   hdfs_site=self.getConfig()['configurations']['hdfs-site'],
                                   principal_name=UnknownConfigurationMock(),
                                   default_fs='hdfs://c6401.ambari.apache.org:8020',
@@ -339,7 +339,7 @@ class TestMetricsCollector(RMFTestCase):
                                   kinit_path_local = '/usr/bin/kinit',
                                   user = 'hdfs',
                                   hadoop_conf_dir = '/etc/hadoop/conf',
-                                  action = ['execute'],
+                                  action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                                   hdfs_site=self.getConfig()['configurations']['hdfs-site'],
                                   principal_name=UnknownConfigurationMock(),
                                   default_fs='hdfs://c6401.ambari.apache.org:8020',

+ 9 - 9
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -340,7 +340,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = False,
@@ -353,7 +353,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -364,7 +364,7 @@ class TestHBaseMaster(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
 
@@ -478,7 +478,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = True,
@@ -491,7 +491,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -502,7 +502,7 @@ class TestHBaseMaster(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
 
@@ -627,7 +627,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = False,
@@ -642,7 +642,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -655,7 +655,7 @@ class TestHBaseMaster(RMFTestCase):
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
 

+ 27 - 27
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -108,7 +108,7 @@ class TestNamenode(RMFTestCase):
         dfs_type = '',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -125,7 +125,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -139,7 +139,7 @@ class TestNamenode(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -221,7 +221,7 @@ class TestNamenode(RMFTestCase):
         dfs_type = '',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -238,7 +238,7 @@ class TestNamenode(RMFTestCase):
         dfs_type = '',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -252,7 +252,7 @@ class TestNamenode(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -346,7 +346,7 @@ class TestNamenode(RMFTestCase):
         dfs_type = '',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
         only_if = True
     )
@@ -360,7 +360,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0770,
         only_if = True
     )
@@ -372,7 +372,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -446,7 +446,7 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -463,7 +463,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -477,7 +477,7 @@ class TestNamenode(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -543,7 +543,7 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -560,7 +560,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -574,7 +574,7 @@ class TestNamenode(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -646,7 +646,7 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -663,7 +663,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -677,7 +677,7 @@ class TestNamenode(RMFTestCase):
         principal_name = 'hdfs',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -749,7 +749,7 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -766,7 +766,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -780,7 +780,7 @@ class TestNamenode(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -851,7 +851,7 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -868,7 +868,7 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0770,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -882,7 +882,7 @@ class TestNamenode(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -961,7 +961,7 @@ class TestNamenode(RMFTestCase):
                               owner = 'hdfs',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
-                              action = ['create_on_execute'],
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               mode = 0777,
                               )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
@@ -978,7 +978,7 @@ class TestNamenode(RMFTestCase):
                               owner = 'ambari-qa',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
-                              action = ['create_on_execute'],
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               mode = 0770,
                               )
     self.assertResourceCalled('HdfsResource', None,
@@ -992,7 +992,7 @@ class TestNamenode(RMFTestCase):
                               principal_name = None,
                               user = 'hdfs',
                               dfs_type = '',
-                              action = ['execute'],
+                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               )
     self.assertNoMoreResources()

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py

@@ -69,7 +69,7 @@ class TestServiceCheck(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         mode = 0777,
@@ -84,7 +84,7 @@ class TestServiceCheck(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['delete_on_execute'],
+        action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
     )
@@ -99,7 +99,7 @@ class TestServiceCheck(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
     )
@@ -113,7 +113,7 @@ class TestServiceCheck(RMFTestCase):
         principal_name = None,
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'],
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()

+ 14 - 14
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -340,7 +340,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/hcat',
@@ -353,7 +353,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
         mode = 0755,
     )
 
@@ -371,7 +371,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -384,7 +384,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
         mode = 0755,
     )
     if not no_tmp:
@@ -399,7 +399,7 @@ class TestHiveServer(RMFTestCase):
           group = 'hdfs',
           hadoop_bin_dir = '/usr/bin',
           type = 'directory',
-          action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+          action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
           mode = 0777,
       )
     self.assertResourceCalled('HdfsResource', None,
@@ -409,7 +409,7 @@ class TestHiveServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/hive',
@@ -528,7 +528,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/hcat',
@@ -541,7 +541,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
 
@@ -555,7 +555,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -568,7 +568,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/custompath/tmp/hive',
@@ -582,7 +582,7 @@ class TestHiveServer(RMFTestCase):
         group = 'hdfs',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -592,7 +592,7 @@ class TestHiveServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/hive',
@@ -926,7 +926,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertNoMoreResources()
@@ -968,7 +968,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
         dfs_type = '',
-        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
+        action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertNoMoreResources()

Some files were not shown because too many files changed in this diff