Преглед изворни кода

Merge branch 'trunk' into branch-alerts-dev

Jonathan Hurley пре 10 година
родитељ
комит
445a53cf5b
100 измењених фајлова са 4448 додато и 1306 уклоњено
  1. 18 1
      ambari-admin/pom.xml
  2. 2 2
      ambari-admin/src/main/resources/ui/admin-web/app/index.html
  3. 0 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/app.js
  4. 4 4
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/NavbarCtrl.js
  5. 4 4
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/CreateViewInstanceCtrl.js
  6. 11 11
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsEditCtrl.js
  7. 2 3
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
  8. 3 3
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/clusters/ClustersManageAccessCtrl.js
  9. 3 4
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsCreateCtrl.js
  10. 17 17
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
  11. 10 7
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js
  12. 3 4
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersCreateCtrl.js
  13. 23 26
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersShowCtrl.js
  14. 106 0
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Alert.js
  15. 8 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js
  16. 1 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Group.js
  17. 4 4
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/PermissionLoader.js
  18. 1 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/User.js
  19. 0 115
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/uiAlert.js
  20. 96 0
      ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
  21. 1 1
      ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/edit.html
  22. 11 4
      ambari-admin/src/main/resources/ui/admin-web/bower.json
  23. 19 0
      ambari-admin/src/main/resources/ui/admin-web/package.json
  24. 43 0
      ambari-admin/src/main/resources/ui/admin-web/test/e2e/signout.js
  25. 58 0
      ambari-admin/src/main/resources/ui/admin-web/test/karma.conf.js
  26. 51 0
      ambari-admin/src/main/resources/ui/admin-web/test/protractor-conf.js
  27. 54 0
      ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js
  28. 117 0
      ambari-agent/src/test/python/resource_management/TestFileSystem.py
  29. 17 1
      ambari-common/src/main/python/resource_management/core/logger.py
  30. 53 33
      ambari-common/src/main/python/resource_management/core/providers/mount.py
  31. 149 0
      ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py
  32. 72 0
      ambari-common/src/main/python/resource_management/libraries/functions/file_system.py
  33. 9 0
      ambari-server/pom.xml
  34. 15 9
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
  35. 4 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
  36. 9 10
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java
  37. 163 64
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
  38. 0 13
      ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
  39. 14 260
      ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
  40. 38 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsHostProvider.java
  41. 302 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsProvider.java
  42. 448 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProvider.java
  43. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
  44. 8 0
      ambari-server/src/main/package/deb/control/preinst
  45. 8 0
      ambari-server/src/main/package/rpm/preinstall.sh
  46. 3 3
      ambari-server/src/main/python/ambari-server.py
  47. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml
  48. 8 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/yarn.py
  49. 6 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
  50. 24 14
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_datanode.py
  51. 2 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
  52. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/capacity-scheduler.xml
  53. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
  54. 11 8
      ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn.py
  55. 31 31
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
  56. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
  57. 6 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
  58. 18 9
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
  59. 11 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  60. 87 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
  61. 15 15
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
  62. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml
  63. 8 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
  64. 0 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
  65. 24 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
  66. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
  67. 11 8
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/yarn.py
  68. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/falcon-env.xml
  69. 4 2
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/falcon.py
  70. 0 89
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/templates/startup.properties.j2
  71. 34 21
      ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/metrics.json
  72. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/capacity-scheduler.xml
  73. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
  74. 84 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/falcon-startup.properties.xml
  75. 192 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
  76. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
  77. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/SLIDER/metainfo.xml
  78. 1079 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metrics.json
  79. 151 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
  80. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
  81. 6 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
  82. 190 167
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java
  83. 45 36
      ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
  84. 114 9
      ambari-server/src/test/python/TestAmbariServer.py
  85. 2 1
      ambari-server/src/test/python/stacks/1.3.2/configs/default.json
  86. 2 1
      ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
  87. 1 5
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  88. 136 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
  89. 8 8
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
  90. 26 26
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
  91. 26 26
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
  92. 0 30
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
  93. 0 30
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
  94. 30 30
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
  95. 0 30
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
  96. 0 50
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
  97. 2 1
      ambari-server/src/test/python/stacks/2.0.6/configs/default.json
  98. 2 1
      ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
  99. 2 0
      ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
  100. 26 26
      ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py

+ 18 - 1
ambari-admin/pom.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <parent>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari-project</artifactId>
@@ -122,6 +122,23 @@
               </arguments>
             </configuration>
           </execution>
+          <!--  @TODO: uncomment below execution for triggering ambar-admin UI unit tests from maven test phase.(AMBARI-7600)
+          <execution>
+            <id>unit test</id>
+            <phase>test</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <workingDirectory>${basedir}/src/main/resources/ui/admin-web</workingDirectory>
+              <executable>npm</executable>
+              <arguments>
+                <argument>run</argument>
+                <argument>test-single-run</argument>
+              </arguments>
+            </configuration>
+          </execution>
+          -->
         </executions>
       </plugin>
       <plugin>

+ 2 - 2
ambari-admin/src/main/resources/ui/admin-web/app/index.html

@@ -52,7 +52,7 @@
             <ul class="nav navbar-nav navbar-right">
               <li>
                 <div class="btn-group" dropdown is-open="status.isopen">
-                <button type="button" class="btn btn-default dropdown-toggle navbar-btn" ng-disabled="disabled">
+                  <button type="button" class="btn btn-default dropdown-toggle navbar-btn" ng-disabled="disabled">
                     <i class="fa fa-user"></i> {{currentUser}} <span class="caret"></span>
                   </button>
                   <ul class="dropdown-menu" role="menu">
@@ -135,7 +135,7 @@
     <script src="scripts/services/Group.js"></script>
     <script src="scripts/services/View.js"></script>
     <script src="scripts/services/Cluster.js"></script>
-    <script src="scripts/services/uiAlert.js"></script>
+    <script src="scripts/services/Alert.js"></script>
     <script src="scripts/services/PermissionLoader.js"></script>
     <script src="scripts/services/PermissionsSaver.js"></script>
     <script src="scripts/services/ConfirmationModal.js"></script>

+ 0 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/app.js

@@ -22,7 +22,6 @@ angular.module('ambariAdminConsole', [
   'ngAnimate',
   'ui.bootstrap',
   'restangular',
-  'angularAlert',
   'toggle-switch',
   'pascalprecht.translate'
 ])

+ 4 - 4
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/NavbarCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('NavbarCtrl',['$scope', 'Cluster', '$location', 'uiAlert', 'ROUTES', 'ConfirmationModal', '$rootScope', function($scope, Cluster, $location, uiAlert, ROUTES, ConfirmationModal, $rootScope) {
+.controller('NavbarCtrl',['$scope', 'Cluster', '$location', 'Alert', 'ROUTES', 'ConfirmationModal', '$rootScope', function($scope, Cluster, $location, Alert, ROUTES, ConfirmationModal, $rootScope) {
   $scope.cluster = null;
   $scope.editCluster = {
     name        : '',
@@ -28,7 +28,7 @@ angular.module('ambariAdminConsole')
   Cluster.getStatus().then(function(cluster) {
     $scope.cluster = cluster;
   }).catch(function(data) {
-  	uiAlert.danger(data.status, data.message);
+    Alert.error('Cannot load cluster status', data.data.message);
   });
 
   $scope.toggleEditName = function($event) {
@@ -57,9 +57,9 @@ angular.module('ambariAdminConsole')
 
     Cluster.editName(oldClusterName, newClusterName).then(function(data) {
       $scope.cluster.Clusters.cluster_name = newClusterName;
-      uiAlert.success('Success', 'The cluster has been renamed to ' + newClusterName + '.');
+      Alert.success('The cluster has been renamed to ' + newClusterName + '.');
     }).catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot rename cluster to ' + newClusterName, data.data.message);
     });
 
     $scope.toggleEditName();

+ 4 - 4
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/CreateViewInstanceCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('CreateViewInstanceCtrl',['$scope', 'View', 'uiAlert', '$routeParams', '$location', function($scope, View, uiAlert, $routeParams, $location) {
+.controller('CreateViewInstanceCtrl',['$scope', 'View', 'Alert', '$routeParams', '$location', function($scope, View, Alert, $routeParams, $location) {
   $scope.form = {};
 
   function loadMeta(){
@@ -74,16 +74,16 @@ angular.module('ambariAdminConsole')
         } else {
           View.createInstance($scope.instance)
           .then(function(data) {
-            uiAlert.success('Created View Instance ' + $scope.instance.instance_name);
+            Alert.success('Created View Instance ' + $scope.instance.instance_name);
             $location.path('/views/' + $scope.instance.view_name + '/versions/' + $scope.instance.version + '/instances/' + $scope.instance.instance_name + '/edit');
           })
           .catch(function(data) {
-            uiAlert.danger(data.data.status, data.data.message);
+            Alert.error('Cannot create instance', data.data.message);
           });
         }
       })
       .catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Cannot create instance', data.data.message);
       });
     }
   };

+ 11 - 11
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsEditCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('ViewsEditCtrl', ['$scope', '$routeParams' , 'View', 'uiAlert', 'PermissionLoader', 'PermissionSaver', 'ConfirmationModal', '$location', function($scope, $routeParams, View, uiAlert, PermissionLoader, PermissionSaver, ConfirmationModal, $location) {
+.controller('ViewsEditCtrl', ['$scope', '$routeParams' , 'View', 'Alert', 'PermissionLoader', 'PermissionSaver', 'ConfirmationModal', '$location', function($scope, $routeParams, View, Alert, PermissionLoader, PermissionSaver, ConfirmationModal, $location) {
   $scope.identity = angular.identity;
   $scope.isConfigurationEmpty = true;
   function reloadViewInfo(){
@@ -42,7 +42,7 @@ angular.module('ambariAdminConsole')
       $scope.isConfigurationEmpty = angular.equals({}, $scope.configuration);
     })
     .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot load instance info', data.data.message);
     });
   }
 
@@ -53,7 +53,7 @@ angular.module('ambariAdminConsole')
     reloadViewInfo();
   });
 
-  function reloadViewPrivilegies(){
+  function reloadViewPrivileges(){
     PermissionLoader.getViewPermissions({
       viewName: $routeParams.viewId,
       version: $routeParams.version,
@@ -66,13 +66,13 @@ angular.module('ambariAdminConsole')
       $scope.isPermissionsEmpty = angular.equals({}, $scope.permissions);
     })
     .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot load permissions', data.data.message);
     });
   }
 
   $scope.permissions = [];
   
-  reloadViewPrivilegies();
+  reloadViewPrivileges();
 
   $scope.editSettingsDisabled = true;
   $scope.toggleSettingsEdit = function() {
@@ -93,7 +93,7 @@ angular.module('ambariAdminConsole')
         $scope.editSettingsDisabled = true;
       })
       .catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Cannot save settings', data.data.message);
       });
     }
   };
@@ -122,7 +122,7 @@ angular.module('ambariAdminConsole')
         $scope.editConfigurationDisabled = true;
       })
       .catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Cannot save properties', data.data.message);
       });
     }
   };
@@ -147,10 +147,10 @@ angular.module('ambariAdminConsole')
         instance_name: $routeParams.instanceId,
       }
     )
-    .then(reloadViewPrivilegies)
+    .then(reloadViewPrivileges)
     .catch(function(data) {
-      reloadViewPrivilegies();
-      uiAlert.danger(data.data.status, data.data.message);
+      reloadViewPrivileges();
+      Alert.error('Cannot save permissions', data.data.message);
     });
     $scope.editPermissionDisabled = true;
   };
@@ -170,7 +170,7 @@ angular.module('ambariAdminConsole')
         $location.path('/views');
       })
       .catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Cannot delete instance', data.data.message);
       });
     });
   };

+ 2 - 3
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js

@@ -18,8 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('ViewsListCtrl',['$scope', 'View', '$modal', 'uiAlert', 'ConfirmationModal', function($scope, View, $modal, uiAlert, ConfirmationModal) {
-
+.controller('ViewsListCtrl',['$scope', 'View', '$modal', 'Alert', 'ConfirmationModal', function($scope, View, $modal, Alert, ConfirmationModal) {
   var deferredList = [];
   $scope.$on('$locationChangeStart', function() {
     deferredList.forEach(function(def) {
@@ -60,7 +59,7 @@ angular.module('ambariAdminConsole')
         });
       })
     }).catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot load views', data.data.message);
     });
   }
 

+ 3 - 3
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/clusters/ClustersManageAccessCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('ClustersManageAccessCtrl', ['$scope', 'Cluster', '$routeParams', 'uiAlert', 'PermissionLoader', 'PermissionSaver', function($scope, Cluster, $routeParams, uiAlert, PermissionLoader, PermissionSaver) {
+.controller('ClustersManageAccessCtrl', ['$scope', 'Cluster', '$routeParams', 'Alert', 'PermissionLoader', 'PermissionSaver', function($scope, Cluster, $routeParams, Alert, PermissionLoader, PermissionSaver) {
   $scope.identity = angular.identity;
   function reloadClusterData(){
     PermissionLoader.getClusterPermissions({
@@ -29,7 +29,7 @@ angular.module('ambariAdminConsole')
       $scope.permissions = angular.copy(permissions);
     })
     .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot load cluster data', data.data.message);
     });;
   }
  
@@ -56,7 +56,7 @@ angular.module('ambariAdminConsole')
       }
     ).then(reloadClusterData)
     .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot save permissions', data.data.message);
       reloadClusterData();
     });
     $scope.isEditMode = false;

+ 3 - 4
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsCreateCtrl.js

@@ -18,19 +18,18 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('GroupsCreateCtrl',['$scope', 'Group', '$location', 'uiAlert', function($scope, Group, $location, uiAlert) {
+.controller('GroupsCreateCtrl',['$scope', 'Group', '$location', 'Alert', function($scope, Group, $location, Alert) {
   $scope.group = new Group();
 
   $scope.createGroup = function() {
     $scope.form.submitted = true;
     if ($scope.form.$valid){
       $scope.group.save().then(function() {
-        uiAlert.success('Created group ' + $scope.group.group_name);
+        Alert.success('Created group <a href="#/groups/' + $scope.group.group_name + '/edit">' + $scope.group.group_name + '</a>');
         $location.path('/groups');
       })
       .catch(function(data) {
-      	data = data.data;
-        uiAlert.danger(data.status, data.message);
+        Alert.error('Group creation error', data.data.message);
       });
     }
   };

+ 17 - 17
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('GroupsEditCtrl',['$scope', 'Group', '$routeParams', 'uiAlert', 'ConfirmationModal', '$location', function($scope, Group, $routeParams, uiAlert, ConfirmationModal, $location) {
+.controller('GroupsEditCtrl',['$scope', 'Group', '$routeParams', 'Alert', 'ConfirmationModal', '$location', function($scope, Group, $routeParams, Alert, ConfirmationModal, $location) {
   $scope.editMode = false;
   $scope.group = new Group($routeParams.id);
   $scope.group.editingUsers = [];
@@ -48,7 +48,7 @@ angular.module('ambariAdminConsole')
     $scope.group.members = newMembers;
     $scope.group.saveMembers().then(loadMembers)
     .catch(function(data) {
-      uiAlert.danger(data.status, data.message);
+      Alert.error('Cannot update group members', data.data.message);
     });
     $scope.isMembersEditing = false;
   };
@@ -76,30 +76,30 @@ angular.module('ambariAdminConsole')
     });
   };
 
-  // Load privilegies
-  Group.getPrivilegies($routeParams.id).then(function(data) {
-    var privilegies = {
+  // Load privileges
+  Group.getPrivileges($routeParams.id).then(function(data) {
+    var privileges = {
       clusters: {},
       views: {}
     };
-    angular.forEach(data.data.items, function(privilegie) {
-      privilegie = privilegie.PrivilegeInfo;
-      if(privilegie.type === 'CLUSTER'){
+    angular.forEach(data.data.items, function(privilege) {
+      privilege = privilege.PrivilegeInfo;
+      if(privilege.type === 'CLUSTER'){
         // This is cluster
-        privilegies.clusters[privilegie.cluster_name] = privilegies.clusters[privilegie.cluster_name] || [];
-        privilegies.clusters[privilegie.cluster_name].push(privilegie.permission_name);
-      } else if ( privilegie.type === 'VIEW'){
-        privilegies.views[privilegie.instance_name] = privilegies.views[privilegie.instance_name] || { privileges:[]};
-        privilegies.views[privilegie.instance_name].version = privilegie.version;
-        privilegies.views[privilegie.instance_name].view_name = privilegie.view_name;
-        privilegies.views[privilegie.instance_name].privileges.push(privilegie.permission_name);
+        privileges.clusters[privilege.cluster_name] = privileges.clusters[privilege.cluster_name] || [];
+        privileges.clusters[privilege.cluster_name].push(privilege.permission_name);
+      } else if ( privilege.type === 'VIEW'){
+        privileges.views[privilege.instance_name] = privileges.views[privilege.instance_name] || { privileges:[]};
+        privileges.views[privilege.instance_name].version = privilege.version;
+        privileges.views[privilege.instance_name].view_name = privilege.view_name;
+        privileges.views[privilege.instance_name].privileges.push(privilege.permission_name);
       }
     });
 
-    $scope.privileges = data.data.items.length ? privilegies : null;
+    $scope.privileges = data.data.items.length ? privileges : null;
     $scope.dataLoaded = true;
   }).catch(function(data) {
-    uiAlert.danger(data.data.status, data.data.message);
+    Alert.error('Cannot load privileges', data.data.message);
   });
 
 

+ 10 - 7
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/mainCtrl.js

@@ -18,13 +18,16 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('MainCtrl',['$scope', 'Auth', 'uiAlert', '$modal', 'Cluster', function($scope, Auth, uiAlert, $modal, Cluster) {
+.controller('MainCtrl',['$scope', '$window','Auth', 'Alert', '$modal', 'Cluster', function($scope, $window, Auth, Alert, $modal, Cluster) {
   $scope.signOut = function() {
-    Auth.signout().then(function() {
-     window.location.pathname = ''; // Change location hard, because Angular works only with relative urls
-    }).catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
-    });
+    var data = JSON.parse(localStorage.ambari);
+    delete data.app.authenticated;
+    delete data.app.loginName;
+    delete data.app.user;
+    localStorage.ambari = JSON.stringify(data);
+    $window.location.pathname = '';
+    $scope.hello = "hello";
+    Auth.signout();
   };
 
   $scope.about = function() {
@@ -47,7 +50,7 @@ angular.module('ambariAdminConsole')
     $scope.cluster = cluster;
     $scope.isLoaded = true;
   }).catch(function(data) {
-      uiAlert.danger(data.status, data.message);
+    Alert.error('Check cluster status error', data.data.message);
   });
 
 }]);

+ 3 - 4
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersCreateCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('UsersCreateCtrl',['$scope', '$routeParams', 'User', '$location', 'uiAlert', function($scope, $routeParams, User, $location, uiAlert) {
+.controller('UsersCreateCtrl',['$scope', '$routeParams', 'User', '$location', 'Alert', function($scope, $routeParams, User, $location, Alert) {
   $scope.user = {
     active: true
   };
@@ -32,11 +32,10 @@ angular.module('ambariAdminConsole')
         'Users/active': !!$scope.user.active,
         'Users/admin': !!$scope.user.admin
       }).then(function() {
-        uiAlert.success('Created user ' + $scope.user.user_name);
+        Alert.success('Created user <a href="#/users/' + $scope.user.user_name + '">' + $scope.user.user_name + "</a>");
         $location.path('/users');
       }).catch(function(data) {;
-        data = data.data;
-        uiAlert.danger(data.status, data.message);
+        Alert.error('User creation error', data.data.message);
       });
     }
   };

+ 23 - 26
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersShowCtrl.js

@@ -18,7 +18,7 @@
 'use strict';
 
 angular.module('ambariAdminConsole')
-.controller('UsersShowCtrl', ['$scope', '$routeParams', 'User', '$modal', '$location', 'ConfirmationModal', 'uiAlert', 'Auth', 'getDifference', 'Group', '$q', function($scope, $routeParams, User, $modal, $location, ConfirmationModal, uiAlert, Auth, getDifference, Group, $q) {
+.controller('UsersShowCtrl', ['$scope', '$routeParams', 'User', '$modal', '$location', 'ConfirmationModal', 'Alert', 'Auth', 'getDifference', 'Group', '$q', function($scope, $routeParams, User, $modal, $location, ConfirmationModal, Alert, Auth, getDifference, Group, $q) {
 
   function loadUserInfo(){
     User.get($routeParams.id).then(function(data) {
@@ -58,13 +58,13 @@ angular.module('ambariAdminConsole')
     // Remove user from groups
     angular.forEach(diff.del, function(groupName) {
       promises.push(Group.removeMemberFromGroup(groupName, $scope.user.user_name).catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Removing from group error', data.data.message);
       }));
     });
     // Add user to groups
     angular.forEach(diff.add, function(groupName) {
       promises.push(Group.addMemberToGroup(groupName, $scope.user.user_name).catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Cannot add user to group', data.data.message);
       }));
     });
     $q.all(promises).then(function() {
@@ -113,9 +113,9 @@ angular.module('ambariAdminConsole')
 
     modalInstance.result.then(function(data) {
       User.setPassword($scope.user, data.password, data.currentUserPassword).then(function() {
-        uiAlert.success('Password changed.');
+        Alert.success('Password changed.');
       }).catch(function(data) {
-        uiAlert.danger(data.data.status, data.data.message);
+        Alert.error('Cannot change password', data.data.message);
       });
     }); 
   };
@@ -141,7 +141,7 @@ angular.module('ambariAdminConsole')
       ConfirmationModal.show('Change Admin Privilege', message + '"'+$scope.user.user_name+'"?').then(function() {
         User.setAdmin($scope.user.user_name, $scope.user.admin)
         .then(function() {
-          loadPrivilegies();
+          loadPrivileges();
         });
       })
       .catch(function() {
@@ -159,37 +159,34 @@ angular.module('ambariAdminConsole')
     });
   };
 
-  // Load privilegies
-  function loadPrivilegies(){
-    User.getPrivilegies($routeParams.id).then(function(data) {
-      var privilegies = {
+  // Load privileges
+  function loadPrivileges(){
+    User.getPrivileges($routeParams.id).then(function(data) {
+      var privileges = {
         clusters: {},
         views: {}
       };
-      angular.forEach(data.data.items, function(privilegie) {
-        privilegie = privilegie.PrivilegeInfo;
-        if(privilegie.type === 'CLUSTER'){
+      angular.forEach(data.data.items, function(privilege) {
+        privilege = privilege.PrivilegeInfo;
+        if(privilege.type === 'CLUSTER'){
           // This is cluster
-          privilegies.clusters[privilegie.cluster_name] = privilegies.clusters[privilegie.cluster_name] || [];
-          privilegies.clusters[privilegie.cluster_name].push(privilegie.permission_name);
-        } else if ( privilegie.type === 'VIEW'){
-          privilegies.views[privilegie.instance_name] = privilegies.views[privilegie.instance_name] || { privileges:[]};
-          privilegies.views[privilegie.instance_name].version = privilegie.version;
-          privilegies.views[privilegie.instance_name].view_name = privilegie.view_name;
-          privilegies.views[privilegie.instance_name].privileges.push(privilegie.permission_name);
+          privileges.clusters[privilege.cluster_name] = privileges.clusters[privilege.cluster_name] || [];
+          privileges.clusters[privilege.cluster_name].push(privilege.permission_name);
+        } else if ( privilege.type === 'VIEW'){
+          privileges.views[privilege.instance_name] = privileges.views[privilege.instance_name] || { privileges:[]};
+          privileges.views[privilege.instance_name].version = privilege.version;
+          privileges.views[privilege.instance_name].view_name = privilege.view_name;
+          privileges.views[privilege.instance_name].privileges.push(privilege.permission_name);
 
         }
       });
 
-      $scope.privileges = data.data.items.length ? privilegies : null;
+      $scope.privileges = data.data.items.length ? privileges : null;
       $scope.dataLoaded = true;
 
     }).catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
+      Alert.error('Cannot load privileges', data.data.message);
     });
   }
-
-  loadPrivilegies();
-  
-    
+  loadPrivileges();  
 }]);

+ 106 - 0
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Alert.js

@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+'use strict';
+
+angular.module('ambariAdminConsole')
+.factory('Alert', [function() {
+  
+  var hideTimeout = null;
+  var $boxContainer = null;
+  var removingTimeout = null;
+
+  function createAlertBox(innerHTML, moreInfo, type){
+    if (!$boxContainer) {
+      $boxContainer = angular.element('<div class="alert-container"/>').appendTo('body');
+      $boxContainer
+        .on('mouseenter', function() {
+          clearTimeout(removingTimeout);
+        })
+        .on('mouseleave', function() {
+          startRemovingTimeout();
+        });
+    }
+    var elem = angular.element('<div><div class="icon-box"></div></div>').addClass('ambariAlert').addClass(type).addClass('invisible');
+
+    elem.append('<div class="content">' + innerHTML + '</div>');
+    if (moreInfo) {
+      $(' <a href class="more-collapse"> more...</a>').appendTo(elem.find('.content'))
+      .on('click', function() {
+        elem.find('.more').show();
+        $(this).remove();
+        return false;
+      });
+      elem.append('<div class="more">'+moreInfo+'</div>');
+    }
+
+    $('<button type="button" class="close"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button>')
+      .appendTo(elem)
+      .on('click', function() {
+        var $box = $(this).closest('.ambariAlert');
+        $box.remove();
+      });
+
+    var $icon = $('<span class="glyphicon"></span>');
+    switch (type){
+      case 'error':
+        $icon.addClass('glyphicon-remove-sign');
+        break;
+      case 'success':
+        $icon.addClass('glyphicon-ok-sign');
+        break;
+      case 'info':
+        $icon.addClass('glyphicon-info-sign');
+        break;
+    }
+    elem.find('.icon-box').append($icon);
+
+    elem.appendTo($boxContainer);
+    setTimeout(function() {
+      elem.removeClass('invisible');
+    }, 0);
+
+    startRemovingTimeout();
+  };
+
+  function startRemovingTimeout(){
+    clearTimeout(removingTimeout);
+    removingTimeout = setTimeout(removeTopBox, 5000);
+  }
+
+  function removeTopBox(){
+    $boxContainer.children().first().remove();
+    if (!$boxContainer.children().length) {
+      $boxContainer.remove();
+      $boxContainer = null;
+    } else {
+      startRemovingTimeout();
+    }
+  }
+
+  return {
+    error: function(innerHTML, moreInfo) {
+      createAlertBox(innerHTML, moreInfo, 'error');
+    },
+    success: function(innerHTML, moreInfo) {
+      createAlertBox(innerHTML, moreInfo, 'success');
+    },
+    info: function(innerHTML, moreInfo) {
+      createAlertBox(innerHTML, moreInfo, 'info');
+    }
+  };
+}]);

+ 8 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Auth.js

@@ -19,7 +19,14 @@
 
 angular.module('ambariAdminConsole')
 .factory('Auth',['$http', 'Settings', function($http, Settings) {
-	var currentUserName = JSON.parse(localStorage.ambari).app.loginName;
+  var ambari;
+  var currentUserName;
+  if (localStorage.ambari) {
+    ambari = JSON.parse(localStorage.ambari);
+    if (ambari && ambari.app && ambari.app.loginName) {
+      currentUserName = ambari.app.loginName;
+    }
+  }
   return {
     signout: function() {
       return $http({

+ 1 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Group.js

@@ -180,7 +180,7 @@ angular.module('ambariAdminConsole')
     );
   };
 
-  Group.getPrivilegies = function(groupId) {
+  Group.getPrivileges = function(groupId) {
     return $http.get(Settings.baseUrl + '/privileges', {
         params:{
           'PrivilegeInfo/principal_type': 'GROUP',

+ 4 - 4
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/PermissionLoader.js

@@ -30,10 +30,10 @@ angular.module('ambariAdminConsole')
         permissionsInner[permission.PermissionInfo.permission_name] = permission;
       });
 
-      // Now we can get privilegies
-      resource.getPrivileges(params).then(function(privilegies) {
-        angular.forEach(privilegies, function(privilegie) {
-          permissionsInner[privilegie.PrivilegeInfo.permission_name][privilegie.PrivilegeInfo.principal_type].push(privilegie.PrivilegeInfo.principal_name);
+      // Now we can get privileges
+      resource.getPrivileges(params).then(function(privileges) {
+        angular.forEach(privileges, function(privilege) {
+          permissionsInner[privilege.PrivilegeInfo.permission_name][privilege.PrivilegeInfo.principal_type].push(privilege.PrivilegeInfo.principal_name);
         });
 
         // After all builded - return object

+ 1 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/User.js

@@ -78,7 +78,7 @@ angular.module('ambariAdminConsole')
     delete: function(userId) {
       return Restangular.one('users', userId).remove();
     },
-    getPrivilegies : function(userId) {
+    getPrivileges : function(userId) {
       return $http.get(Settings.baseUrl + '/privileges', {
         params:{
           'PrivilegeInfo/principal_type': 'USER',

+ 0 - 115
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/uiAlert.js

@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-'use strict';
-
-angular.module('angularAlert',[])
-.factory('uiAlert', [function() {
-  var alerts = [];
-  var isRunning = false;
-
-  var alertBoxGenerator = function(title, message, type) {
-    var elem = angular.element('<div/>').addClass('alert');
-    elem.css({
-      'position': 'fixed',
-      'left': '50%',
-      'z-index': '10000',
-      'opacity': '1',
-      'padding': '20px',
-      WebkitTransition : 'all .5s ease-in-out',
-      MozTransition    : 'all .5s ease-in-out',
-      MsTransition     : 'all .5s ease-in-out',
-      OTransition      : 'all .5s ease-in-out',
-      transition       : 'all .5s ease-in-out',
-      '-webkit-transform': 'translateX(-50%)',
-      '-ms-transform': 'translateX(-50%)',
-      '-o-transform': 'translateX(-50%)',
-      'transform': 'translateX(-50%)'
-    });
-    if(!message){
-      elem.html(title);
-    } else {
-      elem.html('<strong>' + title + '</strong> ' + message);
-    }
-
-    elem.addClass('alert-' + (type ? type : 'info') );
-    $('<button type="button" class="close"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button>')
-      .appendTo(elem)
-      .css({
-        'position': 'absolute',
-        'top': '0',
-        'right': '5px',
-        'outline': 'none'
-      }).on('click', function() {
-        var alert = $(this).parent()[0];
-        for(var i = 0; i < alerts.length; i++){
-          if(alert === alerts[i][0]){
-            alert.remove();
-            alerts.splice(i, 1);
-            resetAlertsPositions();
-            break;
-          }
-        }
-      });
-    elem.appendTo('body');
-    alerts.push(elem);
-    resetAlertsPositions();
-  };
-
-  var resetAlertsPositions = function() {
-    var top = 10, height=0;
-    for(var i = 0 ; i < alerts.length; i++){
-      alerts[i].css('top', top);
-      height = alerts[i].css('height').replace('px', '') * 1;
-      top += height + 10;
-    }
-
-    if(!isRunning && alerts.length){
-      isRunning = true;
-      setTimeout(function() {
-        alerts.shift().css('opacity', '0').one('transitionend webkitTransitionEnd oTransitionEnd otransitionend MSTransitionEnd', function() {
-          isRunning = false;
-          this.remove();
-          resetAlertsPositions();
-        });
-      }, 5000);
-    }
-  };
-
-  var Alert = function(title, message, type) {
-    alertBoxGenerator(title, message, type);
-  };
-
-  Alert.success = function(title, message) {
-    alertBoxGenerator(title, message, 'success');
-  };
-
-  Alert.info = function(title, message) {
-    alertBoxGenerator(title, message, 'info');
-  };
-
-  Alert.warning = function(title, message) {
-    alertBoxGenerator(title, message, 'warning');
-  };
-
-
-  Alert.danger = function(title, message) {
-    alertBoxGenerator(title, message, 'danger');
-  };
-
-  return Alert;
-}]);

+ 96 - 0
ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css

@@ -309,6 +309,10 @@
   transform: rotateZ(90deg);
 }
 
+a.gotoinstance{
+  font-size: 12px;
+}
+
 .ats-switch{
   border-color: #333;
 }
@@ -1018,6 +1022,98 @@ button.btn.btn-xs{
   color: #666;
 }
 
+.alert-container {
+  position: fixed;
+  top: 50px;
+  right: 20px;
+  z-index: 1000;
+  text-align: right;
+  max-width: 450px;
+}
+.ambariAlert {
+  position: relative;
+  border: 1px solid #c4c4c4;
+  border-radius: 4px 0 0 4px;
+  box-shadow: 0 0px 4px #ebebeb;
+  min-width: 200px;
+  max-width: 450px;
+  background: white;
+  margin-bottom: 20px;
+  z-index: 1000;
+  padding: 20px 20px 20px 60px;
+  max-height: 100%;
+  display: block;
+  float: right;
+  clear: both;
+  text-align: left;
+  -webkit-box-sizing: border-box;
+  -moz-box-sizing: border-box;
+  box-sizing: border-box;
+
+  -webkit-transition: all 0.3s linear;
+  -o-transition: all 0.3s linear;
+  transition: all 0.3s linear;
+
+  -webkit-transform: translateX(0px);
+  -ms-transform: translateX(0px);
+  -o-transform: translateX(0px);
+  transform: translateX(0px);
+}
+.ambariAlert .content {
+  display: inline-block;
+  padding-right: 10px;
+}
+.ambariAlert .icon-box {
+  display: inline-block;
+  font-size: 30px;
+  position: absolute;
+  left: 15px;
+  top: 10px;
+}
+.ambariAlert .more {
+  display: none;
+  margin-top: 10px;
+}
+.ambariAlert .more.visible {
+  display: block;
+}
+.ambariAlert.invisible {
+  -webkit-transform: translateX(1000px);
+  -ms-transform: translateX(1000px);
+  -o-transform: translateX(1000px);
+  transform: translateX(1000px);
+  
+  padding: 0;
+  margin: 0;
+  max-height: 0;
+}
+.ambariAlert .close {
+  position: absolute;
+  right: 10px;
+  top: 10px;
+  outline: none;
+}
+.ambariAlert.error {
+  border-left: 3px solid #ef2427;
+}
+.ambariAlert.error .icon-box {
+  color: #ef2427;
+}
+
+.ambariAlert.success {
+  border-left: 3px solid #82c534;
+}
+.ambariAlert.success .icon-box {
+  color: #82c534;
+}
+
+.ambariAlert.info {
+  border-left: 3px solid #ffbc5b;
+}
+.ambariAlert.info .icon-box {
+  color: #ffbc5b;
+}
+
 .edit-cluster-name {
   cursor: pointer;
 }

+ 1 - 1
ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/edit.html

@@ -18,7 +18,7 @@
 <div class="clearfix">
   <ol class="breadcrumb pull-left">
     <li><a href="#/views">Views</a></li>
-    <li class="active">{{instance.ViewInstanceInfo.label}}</li>
+    <li class="active">{{instance.ViewInstanceInfo.label}} <a class="gotoinstance" ng-show="instance.ViewInstanceInfo.visible" href="/#/main{{instance.ViewInstanceInfo.context_path}}">Go to instance</a></li>
   </ol>
   <div class="pull-right top-margin-4" ng-switch="instance.ViewInstanceInfo.static">
     <button ng-switch-when="true" class="btn disabled btn-default btn-delete-instance" tooltip="Cannot Delete Static Instances">Delete Instance</button>

+ 11 - 4
ambari-admin/src/main/resources/ui/admin-web/bower.json

@@ -3,14 +3,21 @@
   "private": true,
   "dependencies": {
     "bootstrap": "~3.1.1",
-    "angular": "~1.2.18",
-    "angular-route": "~1.2.18",
+    "angular": "~1.2.25",
+    "angular-route": "~1.2.25",
     "angular-bootstrap": "~0.11.0",
+    "underscore": "~1.7.0",
     "restangular": "~1.4.0",
     "angular-bootstrap-toggle-switch": "~0.5.1",
-    "angular-animate": "~1.2.23",
+    "angular-animate": "~1.2.25",
     "angular-translate": "~2.2.0",
     "font-awesome": "~4.2.0"
   },
-  "devDependencies": {}
+  "devDependencies": {
+    "angular-mocks": "~1.2.25",
+    "commonjs": "~0.2.0",
+    "chai": "~1.8.0",
+    "mocha": "~1.14.0",
+    "sinon": "~1.10.3"
+  }
 }

+ 19 - 0
ambari-admin/src/main/resources/ui/admin-web/package.json

@@ -4,6 +4,13 @@
   "dependencies": {
   },
   "devDependencies": {
+    "karma": "^0.12.16",
+    "karma-phantomjs-launcher": "~0.1",
+    "karma-chrome-launcher": "^0.1.4",
+    "karma-jasmine": "^0.1.5",
+    "phantomjs": "^1.9.2",
+    "protractor": "~1.0.0",
+    "http-server": "^0.6.1",
     "bower": "1.3.8",
     "gulp": "^3.6.0",
     "gulp-autoprefixer": "^0.0.7",
@@ -18,6 +25,18 @@
     "gulp-useref": "^0.4.2",
     "gulp-plumber": "*"
   },
+  "scripts": {
+    "prestart": "npm install",
+    "start": "http-server -a 0.0.0.0 -p 8000",
+    "pretest": "npm install",
+    "test": "node node_modules/karma/bin/karma start test/karma.conf.js",
+    "test-single-run": "node node_modules/karma/bin/karma start test/karma.conf.js  --single-run",
+    "preupdate-webdriver": "npm install",
+    "update-webdriver": "webdriver-manager update",
+    "preprotractor": "npm run update-webdriver",
+    "protractor": "protractor test/protractor-conf.js",
+    "update-index-async": "node -e \"require('shelljs/global'); sed('-i', /\\/\\/@@NG_LOADER_START@@[\\s\\S]*\\/\\/@@NG_LOADER_END@@/, '//@@NG_LOADER_START@@\\n' + cat('bower_components/angular-loader/angular-loader.min.js') + '\\n//@@NG_LOADER_END@@', 'app/index-async.html');\""
+  },
   "engines": {
     "node": ">=0.10.0"
   }

+ 43 - 0
ambari-admin/src/main/resources/ui/admin-web/test/e2e/signout.js

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+'use strict';
+describe('Ambari sign out from Admin view', function () {
+  describe('Admin view', function () {
+    var ptor = protractor.getInstance();
+    beforeEach(function () {
+      ptor.get('app/index.html');
+      ptor.waitForAngular();
+    });
+    it('should navigate to login page on clicking "Sign out" action', function () {
+      var userDropdownBtn = element(by.binding('currentUser'));
+      var signOutAction = element(by.css('[ng-click="signOut()"]'));
+      //Action-1: Click on user dropdown menu and
+      //Action-2: Click on SignOut action link
+      userDropdownBtn.click().then(function () {
+        signOutAction.click().then(function () {
+          //Validation
+          setTimeout(function () {
+            expect(ptor.getCurrentUrl()).toContain('#/login');
+          }, 3000);
+        });
+      });
+    });
+  });
+});
+
+

+ 58 - 0
ambari-admin/src/main/resources/ui/admin-web/test/karma.conf.js

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+module.exports = function(config){
+  config.set({
+
+    basePath : '../',
+
+    files : [
+      'app/bower_components/angular/angular.js',
+      'app/bower_components/angular-animate/angular-animate.js',
+      'app/bower_components/angular-bootstrap/ui-bootstrap.js',
+      'app/bower_components/angular-bootstrap-toggle-switch/angular-toggle-switch.js',
+      'app/bower_components/angular-route/angular-route.js',
+      'app/bower_components/angular-translate/angular-translate.js',
+      'app/bower_components/underscore/underscore.js',
+      'app/bower_components/restangular/dist/restangular.js',
+      'app/bower_components/mocha/mocha.js',
+      'app/bower_components/chai/chai.js',
+      'app/bower_components/sinon/lib/sinon.js',
+      'app/bower_components/angular-mocks/angular-mocks.js',
+      'app/scripts/**/*.js',
+      'test/unit/**/*.js'
+    ],
+
+    autoWatch : true,
+
+    frameworks: ['jasmine'],
+
+    browsers: ['PhantomJS'],
+
+    plugins : [
+            'karma-jasmine',
+            'karma-phantomjs-launcher'
+            ],
+
+    junitReporter : {
+      outputFile: 'test_out/unit.xml',
+      suite: 'unit'
+    }
+
+  });
+};

+ 51 - 0
ambari-admin/src/main/resources/ui/admin-web/test/protractor-conf.js

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+exports.config = {
+  allScriptsTimeout: 11000,
+
+  specs: [
+    'e2e/*.js'
+  ],
+
+  capabilities: {
+    'browserName': 'chrome'
+  },
+
+  chromeOnly: true,
+
+  baseUrl: 'http://localhost:8000',
+
+  rootElement: 'body',
+
+  onPrepare: function() {
+
+  },
+
+
+  framework: 'jasmine',
+
+  jasmineNodeOpts: {
+    onComplete: null,
+    isVerbose: true,
+    showColors: true,
+    includeStackTrace: true,
+    defaultTimeoutInterval: 30000
+  }
+};
+

+ 54 - 0
ambari-admin/src/main/resources/ui/admin-web/test/unit/controllers/mainCtrl_test.js

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+describe('#Auth', function () {
+
+  describe('signout', function () {
+    var scope, ctrl, $httpBackend, $window, clusterService,deferred;
+    beforeEach(module('ambariAdminConsole', function($provide){
+      $provide.value('$window', {location: {pathname: 'http://c6401.ambari.apache.org:8080/views/ADMIN_VIEW/1.0.0/INSTANCE/#/'}});
+      localStorage.ambari = JSON.stringify({app: {authenticated: true, loginName: 'admin', user: 'user'}});
+    }));
+    afterEach(function() {
+      $httpBackend.verifyNoOutstandingExpectation();
+      $httpBackend.verifyNoOutstandingRequest();
+    });
+    beforeEach(inject(function (_$httpBackend_, $rootScope, $controller, _$window_, _Cluster_,_$q_) {
+      clusterService =  _Cluster_;
+      deferred = _$q_.defer();
+      spyOn(clusterService, 'getStatus').andReturn(deferred.promise);
+      deferred.resolve('c1');
+      $window = _$window_;
+      $httpBackend = _$httpBackend_;
+      $httpBackend.whenGET('/api/v1/logout').respond(200,{message: "successfully logged out"});
+      scope = $rootScope.$new();
+      scope.$apply();
+      ctrl = $controller('MainCtrl', {$scope: scope});
+    }));
+
+    it('should reset window.location and ambari localstorage', function () {
+      scope.signOut();
+      $httpBackend.flush();
+      chai.expect($window.location.pathname).to.be.empty;
+      var data = JSON.parse(localStorage.ambari);
+      chai.expect(data.app.authenticated).to.equal(undefined);
+      chai.expect(data.app.loginName).to.equal(undefined);
+      chai.expect(data.app.user).to.equal(undefined);
+    });
+  });
+});

+ 117 - 0
ambari-agent/src/test/python/resource_management/TestFileSystem.py

@@ -0,0 +1,117 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+from unittest import TestCase
+from mock.mock import patch
+
+from resource_management.libraries.functions import file_system
+import resource_management.core.providers.mount
+
+
+class TestFileSystem(TestCase):
+  """
+  Test the functionality of the file_system.py file that checks for the mount point of a path.
+  """
+
+  class MOUNT_TYPE:
+    SINGLE_ROOT = 1
+    MULT_DRIVE_CONFLICT = 2
+    MULT_DRIVE_DISTINCT = 3
+
+  def _get_mount(self, type):
+    """
+    /hadoop/hdfs/data will always be on the root
+
+    If the type is MULT_DRIVE_CONFLICT:
+    /hadoop/hdfs/data/1 is on /dev/sda1
+    /hadoop/hdfs/data/2 is on /dev/sda1
+
+    If the type is MULT_DRIVE_DISTINCT:
+    /hadoop/hdfs/data/1 is on /dev/sda1
+    /hadoop/hdfs/data/2 is on /dev/sda2
+    """
+    out = "/dev/mapper/VolGroup-lv_root on / type ext4 (rw)" + os.linesep + \
+          "proc on /proc type proc (rw)" + os.linesep + \
+          "sysfs on /sys type sysfs (rw)" + os.linesep + \
+          "devpts on /dev/pts type devpts (rw,gid=5,mode=620)" + os.linesep + \
+          "tmpfs on /dev/shm type tmpfs (rw)" + os.linesep + \
+          "/dev/sda1 on /boot type ext4 (rw)" + os.linesep + \
+          "none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)" + os.linesep + \
+          "sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)" + os.linesep + \
+          "/vagrant on /vagrant type vboxsf (uid=501,gid=501,rw)"
+
+    if type == self.MOUNT_TYPE.MULT_DRIVE_CONFLICT:
+      out += os.linesep + \
+             "/dev/sda1 on /hadoop/hdfs type ext4 (rw)"
+    elif type == self.MOUNT_TYPE.MULT_DRIVE_DISTINCT:
+      out += os.linesep + \
+             "/dev/sda1 on /hadoop/hdfs/data/1 type ext4 (rw)" + os.linesep + \
+             "/dev/sda2 on /hadoop/hdfs/data/2 type ext4 (rw)"
+
+    out_array = [x.split(' ') for x in out.strip().split('\n')]
+    mount_val = []
+    for m in out_array:
+      if len(m) >= 6 and m[1] == "on" and m[3] == "type":
+        x = dict(
+          device=m[0],
+          mount_point=m[2],
+          fstype=m[4],
+          options=m[5][1:-1].split(',') if len(m[5]) >= 2 else []
+        )
+        mount_val.append(x)
+
+    return mount_val
+
+  def test_invalid(self):
+    """
+    Testing when parameters are invalid or missing.
+    """
+    mount_point = file_system.get_mount_point_for_dir(None)
+    self.assertEqual(mount_point, None)
+
+    mount_point = file_system.get_mount_point_for_dir("")
+    self.assertEqual(mount_point, None)
+
+    mount_point = file_system.get_mount_point_for_dir("  ")
+    self.assertEqual(mount_point, None)
+
+
+  @patch('resource_management.core.providers.mount.get_mounted')
+  def test_at_root(self, mounted_mock):
+    """
+    Testing when the path is mounted on the root.
+    """
+    mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.SINGLE_ROOT)
+
+    mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data")
+    self.assertEqual(mount_point, "/")
+
+
+  @patch('resource_management.core.providers.mount.get_mounted')
+  def test_at_drive(self, mounted_mock):
+    """
+    Testing when the path is mounted on a virtual file system not at the root.
+    """
+    mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.MULT_DRIVE_DISTINCT)
+
+    mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/1")
+    self.assertEqual(mount_point, "/hadoop/hdfs/data/1")
+
+    mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/2")
+    self.assertEqual(mount_point, "/hadoop/hdfs/data/2")

+ 17 - 1
ambari-common/src/main/python/resource_management/core/logger.py

@@ -29,7 +29,15 @@ class Logger:
   
   # unprotected_strings : protected_strings map
   sensitive_strings = {}
-  
+
+  @staticmethod
+  def error(text):
+    Logger.logger.error(Logger.get_protected_text(text))
+
+  @staticmethod
+  def warning(text):
+    Logger.logger.warning(Logger.get_protected_text(text))
+
   @staticmethod
   def info(text):
     Logger.logger.info(Logger.get_protected_text(text))
@@ -38,6 +46,14 @@ class Logger:
   def debug(text):
     Logger.logger.debug(Logger.get_protected_text(text))
 
+  @staticmethod
+  def error_resource(resource):
+    Logger.error(Logger.get_protected_text(Logger._get_resource_repr(resource)))
+
+  @staticmethod
+  def warning_resource(resource):
+    Logger.warning(Logger.get_protected_text(Logger._get_resource_repr(resource)))
+
   @staticmethod
   def info_resource(resource):
     Logger.info(Logger.get_protected_text(Logger._get_resource_repr(resource)))

+ 53 - 33
ambari-common/src/main/python/resource_management/core/providers/mount.py

@@ -24,11 +24,62 @@ from __future__ import with_statement
 
 import os
 import re
+from subprocess import Popen, PIPE, STDOUT
+
 from resource_management.core.base import Fail
 from resource_management.core.providers import Provider
 from resource_management.core.logger import Logger
 
 
+def get_mounted():
+  """
+  :return: Return a list of mount objects (dictionary type) that contain the device, mount point, and other options.
+  """
+  p = Popen("mount", stdout=PIPE, stderr=STDOUT, shell=True)
+  out = p.communicate()[0]
+  if p.wait() != 0:
+    raise Fail("Getting list of mounts (calling mount) failed")
+
+  mounts = [x.split(' ') for x in out.strip().split('\n')]
+
+  results = []
+  for m in mounts:
+    # Example of m:
+    # /dev/sda1 on / type ext4 (rw,barrier=0)
+    # /dev/sdb on /grid/0 type ext4 (rw,discard)
+    if len(m) >= 6 and m[1] == "on" and m[3] == "type":
+      x = dict(
+        device=m[0],
+        mount_point=m[2],
+        fstype=m[4],
+        options=m[5][1:-1].split(',') if len(m[5]) >= 2 else []
+      )
+      results.append(x)
+
+  return results
+
+
+def get_fstab(self):
+  """
+  :return: Return a list of objects (dictionary type) representing the file systems table.
+  """
+  mounts = []
+  with open("/etc/fstab", "r") as fp:
+    for line in fp:
+      line = line.split('#', 1)[0].strip()
+      mount = re.split('\s+', line)
+      if len(mount) == 6:
+        mounts.append(dict(
+          device=mount[0],
+          mount_point=mount[1],
+          fstype=mount[2],
+          options=mount[3].split(","),
+          dump=int(mount[4]),
+          passno=int(mount[5]),
+          ))
+  return mounts
+
+
 class MountProvider(Provider):
   def action_mount(self):
     if not os.path.exists(self.resource.mount_point):
@@ -89,7 +140,7 @@ class MountProvider(Provider):
     if self.resource.device and not os.path.exists(self.resource.device):
       raise Fail("%s Device %s does not exist" % (self, self.resource.device))
 
-    mounts = self.get_mounted()
+    mounts = get_mounted()
     for m in mounts:
       if m['mount_point'] == self.resource.mount_point:
         return True
@@ -97,41 +148,10 @@ class MountProvider(Provider):
     return False
 
   def is_enabled(self):
-    mounts = self.get_fstab()
+    mounts = get_fstab()
     for m in mounts:
       if m['mount_point'] == self.resource.mount_point:
         return True
 
     return False
 
-  def get_mounted(self):
-    p = Popen("mount", stdout=PIPE, stderr=STDOUT, shell=True)
-    out = p.communicate()[0]
-    if p.wait() != 0:
-      raise Fail("[%s] Getting list of mounts (calling mount) failed" % self)
-
-    mounts = [x.split(' ') for x in out.strip().split('\n')]
-
-    return [dict(
-      device=m[0],
-      mount_point=m[2],
-      fstype=m[4],
-      options=m[5][1:-1].split(','),
-    ) for m in mounts if m[1] == "on" and m[3] == "type"]
-
-  def get_fstab(self):
-    mounts = []
-    with open("/etc/fstab", "r") as fp:
-      for line in fp:
-        line = line.split('#', 1)[0].strip()
-        mount = re.split('\s+', line)
-        if len(mount) == 6:
-          mounts.append(dict(
-            device=mount[0],
-            mount_point=mount[1],
-            fstype=mount[2],
-            options=mount[3].split(","),
-            dump=int(mount[4]),
-            passno=int(mount[5]),
-          ))
-    return mounts

+ 149 - 0
ambari-common/src/main/python/resource_management/libraries/functions/dfs_datanode_helper.py

@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+__all__ = ["handle_dfs_data_dir", ]
+import os
+
+from resource_management.libraries.functions.file_system import get_mount_point_for_dir, get_and_cache_mount_points
+from resource_management.core.logger import Logger
+
+
+def _write_data_dir_to_mount_in_file(new_data_dir_to_mount_point):
+  """
+  :param new_data_dir_to_mount_point: Dictionary to write to the data_dir_mount_file file, where
+  the key is each DFS data dir, and the value is its current mount point.
+  :return: Returns True on success, False otherwise.
+  """
+  import params
+
+  # Overwrite the existing file, or create it if doesn't exist
+  if params.data_dir_mount_file:
+    try:
+      with open(str(params.data_dir_mount_file), "w") as f:
+        f.write("# This file keeps track of the last known mount-point for each DFS data dir.\n")
+        f.write("# It is safe to delete, since it will get regenerated the next time that the DataNode starts.\n")
+        f.write("# However, it is not advised to delete this file since Ambari may \n")
+        f.write("# re-create a DFS data dir that used to be mounted on a drive but is now mounted on the root.\n")
+        f.write("# Comments begin with a hash (#) symbol\n")
+        f.write("# data_dir,mount_point\n")
+        for kv in new_data_dir_to_mount_point.iteritems():
+          f.write(kv[0] + "," + kv[1] + "\n")
+    except Exception, e:
+      Logger.error("Encountered error while attempting to save DFS data dir mount mount values to file %s" %
+                   str(params.data_dir_mount_file))
+      return False
+  return True
+
+
+def _get_data_dir_to_mount_from_file():
+  """
+  :return: Returns a dictionary by parsing the data_dir_mount_file file,
+  where the key is each DFS data dir, and the value is its last known mount point.
+  """
+  import params
+  data_dir_to_mount = {}
+
+  if params.data_dir_mount_file is not None and os.path.exists(str(params.data_dir_mount_file)):
+    try:
+      with open(str(params.data_dir_mount_file), "r") as f:
+        for line in f:
+          # Ignore comments
+          if line and len(line) > 0 and line[0] == "#":
+            continue
+          line = line.strip().lower()
+          line_array = line.split(",")
+          if line_array and len(line_array) == 2:
+            data_dir_to_mount[line_array[0]] = line_array[1]
+    except Exception, e:
+      Logger.error("Encountered error while attempting to read DFS data dir mount mount values from file %s" %
+                   str(params.data_dir_mount_file))
+  return data_dir_to_mount
+
+
+def handle_dfs_data_dir(func, params):
+  """
+  This function determine which DFS data dir paths can be created.
+  There are 2 uses cases:
+  1. Customers that have many DFS data dirs, each one on a separate mount point that corresponds to a different drive.
+  2. Developers that are using a sandbox VM and all DFS data dirs are mounted on the root.
+
+  The goal is to avoid forcefully creating a DFS data dir when a user's drive fails. In this scenario, the
+  mount point for a DFS data dir changes from something like /hadoop/hdfs/data/data1 to /
+  If Ambari forcefully creates the directory when it doesn't exist and drive became unmounted, then Ambari will soon
+  fill up the root drive, which is bad. Instead, we should not create the directory and let HDFS handle the failure
+  based on its tolerance of missing directories.
+
+  This function relies on the dfs.datanode.data.dir.mount.file parameter to parse a file that contains
+  a mapping from a DFS data dir, and its last known mount point.
+  After determining which DFS data dirs can be created if they don't exist, it recalculates the mount points and
+  writes to the file again.
+  :param func: Function that will be called if a directory will be created. This function
+               will be called as func(data_dir, params)
+  :param params: parameters to pass to function pointer
+  """
+  prev_data_dir_to_mount_point = _get_data_dir_to_mount_from_file()
+
+  allowed_to_create_any_dir = params.data_dir_mount_file is None or not os.path.exists(params.data_dir_mount_file)
+
+  valid_data_dirs = []
+  for data_dir in params.dfs_data_dir.split(","):
+    if data_dir is None or data_dir.strip() == "":
+      continue
+
+    data_dir = data_dir.strip().lower()
+    valid_data_dirs.append(data_dir)
+
+    if not os.path.isdir(data_dir):
+      create_this_dir = allowed_to_create_any_dir
+      # Determine if should be allowed to create the data_dir directory
+      if not create_this_dir:
+        last_mount_point_for_dir = prev_data_dir_to_mount_point[data_dir] if data_dir in prev_data_dir_to_mount_point else None
+        if last_mount_point_for_dir is None:
+          # Couldn't retrieve any information about where this dir used to be mounted, so allow creating the directory
+          # to be safe.
+          create_this_dir = True
+        else:
+          curr_mount_point = get_mount_point_for_dir(data_dir)
+
+          # This means that create_this_dir will stay false if the directory became unmounted.
+          if last_mount_point_for_dir == "/" or (curr_mount_point is not None and curr_mount_point != "/"):
+            create_this_dir = True
+
+      if create_this_dir:
+        Logger.info("Forcefully creating directory: %s" % str(data_dir))
+
+        # Call the function
+        func(data_dir, params)
+      else:
+        Logger.warning("Directory %s does not exist and became unmounted." % str(data_dir))
+
+  # Refresh the known mount points
+  get_and_cache_mount_points(refresh=True)
+
+  new_data_dir_to_mount_point = {}
+  for data_dir in valid_data_dirs:
+    # At this point, the directory may or may not exist
+    if os.path.isdir(data_dir):
+      curr_mount_point = get_mount_point_for_dir(data_dir)
+      new_data_dir_to_mount_point[data_dir] = curr_mount_point
+
+  # Save back to the file
+  _write_data_dir_to_mount_in_file(new_data_dir_to_mount_point)

+ 72 - 0
ambari-common/src/main/python/resource_management/libraries/functions/file_system.py

@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+__all__ = ["get_and_cache_mount_points", "get_mount_point_for_dir"]
+import os
+from resource_management.core.logger import Logger
+from resource_management.core.providers import mount
+
+# Global variable
+mounts = None
+
+
+def get_and_cache_mount_points(refresh=False):
+  """
+  :param refresh: Boolean flag indicating whether to refresh the "mounts" variable if already cached.
+  :return: Returns the "mounts" variable. Calculates and caches it the first time if it is None or the "refresh" param
+  is set to True.
+  """
+  if mounts is not None and not refresh:
+    return mounts
+  else:
+    global mounts
+    mounts = mount.get_mounted()
+    for m in mounts:
+      if m["mount_point"] is not None:
+        m["mount_point"] = m["mount_point"].rstrip()
+    Logger.info("Host contains mounts: %s." % str([m["mount_point"] for m in mounts]))
+    return mounts
+
+
+def get_mount_point_for_dir(dir):
+  """
+  :param dir: Directory to check, even if it doesn't exist.
+  :return: Returns the closest mount point as a string for the directory. if the "dir" variable is None, will return None.
+  If the directory does not exist, will return "/".
+  """
+  best_mount_found = None
+  if dir:
+    dir = dir.strip().lower()
+
+    cached_mounts = get_and_cache_mount_points()
+
+    # If the path is "/hadoop/hdfs/data", then possible matches for mounts could be
+    # "/", "/hadoop/hdfs", and "/hadoop/hdfs/data".
+    # So take the one with the greatest number of segments.
+    for m in cached_mounts:
+      if dir.startswith(m['mount_point']):
+        if best_mount_found is None:
+          best_mount_found = m["mount_point"]
+        elif best_mount_found.count(os.path.sep) < m["mount_point"].count(os.path.sep):
+          best_mount_found = m["mount_point"]
+
+  Logger.info("Mount point for directory %s is %s" % (str(dir), str(best_mount_found)))
+  return best_mount_found

+ 9 - 0
ambari-server/pom.xml

@@ -25,6 +25,8 @@
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
+    <!-- On centos the python xml's are inside python package -->
+    <python.xml.package>${python.ver}</python.xml.package>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
     <deb.architecture>amd64</deb.architecture>
     <deb.dependency.list>openssl, postgresql (&gt;= 8.1), ${deb.python.ver}, curl</deb.dependency.list>
@@ -220,6 +222,7 @@
             <require>postgresql-server &gt;= 8.1</require>
             <require>openssl</require>
             <require>${python.ver}</require>
+            <require>${python.xml.package}</require>
           </requires>
           <postinstallScriptlet>
             <scriptFile>src/main/package/rpm/postinstall.sh</scriptFile>
@@ -1150,6 +1153,12 @@
         </plugins>
       </build>
     </profile>
+    <profile>
+      <id>suse11</id>
+      <properties>
+        <python.xml.package>python-xml</python.xml.package>
+      </properties>
+    </profile>
   </profiles>
   <dependencies>
     <dependency>

+ 15 - 9
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java

@@ -45,6 +45,7 @@ import org.apache.ambari.server.controller.ganglia.GangliaHostProvider;
 import org.apache.ambari.server.controller.ganglia.GangliaReportPropertyProvider;
 import org.apache.ambari.server.controller.jmx.JMXHostProvider;
 import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
+import org.apache.ambari.server.controller.metrics.MetricsHostProvider;
 import org.apache.ambari.server.controller.nagios.NagiosPropertyProvider;
 import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.NoSuchResourceException;
@@ -72,7 +73,7 @@ import com.google.inject.Inject;
 /**
  * An abstract provider module implementation.
  */
-public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider {
+public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider, MetricsHostProvider {
 
   private static final int PROPERTY_REQUEST_CONNECT_TIMEOUT = 5000;
   private static final int PROPERTY_REQUEST_READ_TIMEOUT    = 10000;
@@ -219,7 +220,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
   }
 
 
-  // ----- JMXHostProvider ---------------------------------------------------
+  // ----- MetricsHostProvider ---------------------------------------------------
 
   @Override
   public String getHostName(String clusterName, String componentName) throws SystemException {
@@ -240,6 +241,8 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
     return hosts;
   }
 
+  // ----- JMXHostProvider ---------------------------------------------------
+
   @Override
   public String getPort(String clusterName, String componentName) throws SystemException {
     // Parent map need not be synchronized
@@ -474,11 +477,11 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               type,
               streamProvider,
               this,
+              this,
               PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
               null,
               PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"),
-              PropertyHelper.getPropertyId("ServiceComponentInfo", "state"),
-              Collections.singleton("STARTED"));
+              PropertyHelper.getPropertyId("ServiceComponentInfo", "state"));
 
           PropertyProvider gpp = createGangliaComponentPropertyProvider(
               type,
@@ -492,6 +495,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               type,
               this,
               this,
+              this,
               streamProvider,
               PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
               null,
@@ -507,11 +511,11 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               type,
               streamProvider,
               this,
+              this,
               PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
               PropertyHelper.getPropertyId("HostRoles", "host_name"),
               PropertyHelper.getPropertyId("HostRoles", "component_name"),
-              PropertyHelper.getPropertyId("HostRoles", "state"),
-              Collections.singleton("STARTED"));
+              PropertyHelper.getPropertyId("HostRoles", "state"));
 
           PropertyProvider gpp = createGangliaHostComponentPropertyProvider(
               type,
@@ -526,6 +530,7 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
               type,
               this,
               this,
+              this,
               streamProvider,
               PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
               PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -749,14 +754,15 @@ public abstract class AbstractProviderModule implements ProviderModule, Resource
    */
   private PropertyProvider createJMXPropertyProvider(Resource.Type type, StreamProvider streamProvider,
                                                      JMXHostProvider jmxHostProvider,
+                                                     MetricsHostProvider metricsHostProvider,
                                                      String clusterNamePropertyId,
                                                      String hostNamePropertyId,
                                                      String componentNamePropertyId,
-                                                     String statePropertyId,
-                                                     Set<String> healthyStates) {
+                                                     String statePropertyId) {
     
     return new JMXPropertyProvider(PropertyHelper.getJMXPropertyIds(type), streamProvider,
-          jmxHostProvider, clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId, statePropertyId, healthyStates);
+        jmxHostProvider, metricsHostProvider, clusterNamePropertyId, hostNamePropertyId,
+                    componentNamePropertyId, statePropertyId);
   }
 
   /**

+ 4 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -853,6 +853,8 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<String, PropertyUpdater>();
+
 
     Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
 
@@ -881,6 +883,7 @@ public class BlueprintConfigurationProcessor {
     multiHostTopologyUpdaters.put("storm-site", multiStormSiteMap);
     multiHostTopologyUpdaters.put("core-site", multiCoreSiteMap);
     multiHostTopologyUpdaters.put("hdfs-site", multiHdfsSiteMap);
+    multiHostTopologyUpdaters.put("hive-site", multiHiveSiteMap);
 
     dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap);
 
@@ -936,6 +939,7 @@ public class BlueprintConfigurationProcessor {
     multiWebhcatSiteMap.put("templeton.hive.properties", new MultipleHostTopologyUpdater("HIVE_SERVER"));
     multiWebhcatSiteMap.put("templeton.kerberos.principal", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
     hiveEnvMap.put("hive_hostname", new SingleHostTopologyUpdater("HIVE_SERVER"));
+    multiHiveSiteMap.put("hive.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
 
     // OOZIE_SERVER
     oozieSiteMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER"));

+ 9 - 10
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java

@@ -21,10 +21,8 @@ package org.apache.ambari.server.controller.internal;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * Simple resource implementation.
@@ -39,14 +37,15 @@ public class ResourceImpl implements Resource {
   /**
    * The map of property maps keyed by property category.
    */
-  private final Map<String, Map<String, Object>> propertiesMap = new TreeMap<String, Map<String, Object>>();
+  private final Map<String, Map<String, Object>> propertiesMap =
+      Collections.synchronizedMap(new TreeMap<String, Map<String, Object>>());
 
   // ----- Constructors ------------------------------------------------------
 
   /**
    * Create a resource of the given type.
    *
-   * @param type  the resource type
+   * @param type the resource type
    */
   public ResourceImpl(Type type) {
     this.type = type;
@@ -55,7 +54,7 @@ public class ResourceImpl implements Resource {
   /**
    * Copy constructor
    *
-   * @param resource  the resource to copy
+   * @param resource the resource to copy
    */
   public ResourceImpl(Resource resource) {
     this(resource, null);
@@ -65,8 +64,8 @@ public class ResourceImpl implements Resource {
    * Construct a resource from the given resource, setting only the properties
    * that are found in the given set of property and category ids.
    *
-   * @param resource     the resource to copy
-   * @param propertyIds  the set of requested property and category ids
+   * @param resource    the resource to copy
+   * @param propertyIds the set of requested property and category ids
    */
   public ResourceImpl(Resource resource, Set<String> propertyIds) {
     this.type = resource.getType();
@@ -106,7 +105,7 @@ public class ResourceImpl implements Resource {
 
     Map<String, Object> properties = propertiesMap.get(categoryKey);
     if (properties == null) {
-      properties = new TreeMap<String, Object>();
+      properties = Collections.synchronizedMap(new TreeMap<String, Object>());
       propertiesMap.put(categoryKey, properties);
     }
     properties.put(PropertyHelper.getPropertyName(id), value);

+ 163 - 64
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java

@@ -35,6 +35,7 @@ import org.apache.ambari.server.controller.ganglia.GangliaHostProvider;
 import org.apache.ambari.server.controller.ganglia.GangliaPropertyProvider;
 import org.apache.ambari.server.controller.jmx.JMXHostProvider;
 import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
+import org.apache.ambari.server.controller.metrics.MetricsHostProvider;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.PropertyProvider;
 import org.apache.ambari.server.controller.spi.Request;
@@ -56,57 +57,71 @@ import com.google.inject.Injector;
  * This class analyzes a service's metrics to determine if additional
  * metrics should be fetched.  It's okay to maintain state here since these
  * are done per-request.
- *
  */
 public class StackDefinedPropertyProvider implements PropertyProvider {
   private static final Logger LOG = LoggerFactory.getLogger(StackDefinedPropertyProvider.class);
-  
+
   @Inject
   private static Clusters clusters = null;
   @Inject
   private static AmbariMetaInfo metaInfo = null;
-  
+  @Inject
+  private static Injector injector = null;
+
+
   private Resource.Type type = null;
   private String clusterNamePropertyId = null;
   private String hostNamePropertyId = null;
   private String componentNamePropertyId = null;
-  private String jmxStatePropertyId = null;
+  private String resourceStatePropertyId = null;
   private ComponentSSLConfiguration sslConfig = null;
   private StreamProvider streamProvider = null;
   private JMXHostProvider jmxHostProvider;
   private GangliaHostProvider gangliaHostProvider;
   private PropertyProvider defaultJmx = null;
   private PropertyProvider defaultGanglia = null;
-  
+
+  private final MetricsHostProvider metricsHostProvider;
+
+  /**
+   * PropertyHelper/AbstractPropertyProvider expect map of maps,
+   * that's why we wrap metrics into map
+   */
+  public static final String WRAPPED_METRICS_KEY = "WRAPPED_METRICS_KEY";
+
   @Inject
   public static void init(Injector injector) {
     clusters = injector.getInstance(Clusters.class);
     metaInfo = injector.getInstance(AmbariMetaInfo.class);
+    StackDefinedPropertyProvider.injector = injector;
   }
-  
+
   public StackDefinedPropertyProvider(Resource.Type type,
       JMXHostProvider jmxHostProvider,
       GangliaHostProvider gangliaHostProvider,
+      MetricsHostProvider metricsHostProvider,
       StreamProvider streamProvider,
       String clusterPropertyId,
       String hostPropertyId,
       String componentPropertyId,
-      String jmxStatePropertyId,
+      String resourceStatePropertyId,
       PropertyProvider defaultJmxPropertyProvider,
       PropertyProvider defaultGangliaPropertyProvider
-      ) {
-    
+  ) {
+
+    this.metricsHostProvider = metricsHostProvider;
+
     if (null == clusterPropertyId)
       throw new NullPointerException("Cluster name property id cannot be null");
     if (null == componentPropertyId)
       throw new NullPointerException("Component name property id cannot be null");
-    
+
     this.type = type;
-    
+
     clusterNamePropertyId = clusterPropertyId;
     hostNamePropertyId = hostPropertyId;
     componentNamePropertyId = componentPropertyId;
-    this.jmxStatePropertyId = jmxStatePropertyId;
+    this.resourceStatePropertyId = resourceStatePropertyId;
     this.jmxHostProvider = jmxHostProvider;
     this.gangliaHostProvider = gangliaHostProvider;
     sslConfig = ComponentSSLConfiguration.instance();
@@ -114,34 +129,34 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
     defaultJmx = defaultJmxPropertyProvider;
     defaultGanglia = defaultGangliaPropertyProvider;
   }
-      
-  
+
+
   @Override
   public Set<Resource> populateResources(Set<Resource> resources,
       Request request, Predicate predicate) throws SystemException {
 
     // only arrange for one instance of Ganglia and JMX instantiation
-    Map<String, Map<String, PropertyInfo>> gangliaMap = new HashMap<String, Map<String,PropertyInfo>>();
+    Map<String, Map<String, PropertyInfo>> gangliaMap = new HashMap<String, Map<String, PropertyInfo>>();
     Map<String, Map<String, PropertyInfo>> jmxMap = new HashMap<String, Map<String, PropertyInfo>>();
 
     List<PropertyProvider> additional = new ArrayList<PropertyProvider>();
-    
+
     try {
       for (Resource r : resources) {
         String clusterName = r.getPropertyValue(clusterNamePropertyId).toString();
         String componentName = r.getPropertyValue(componentNamePropertyId).toString();
-        
+
         Cluster cluster = clusters.getCluster(clusterName);
         StackId stack = cluster.getDesiredStackVersion();
         String svc = metaInfo.getComponentToService(stack.getStackName(),
             stack.getStackVersion(), componentName);
-        
+
         List<MetricDefinition> defs = metaInfo.getMetrics(
             stack.getStackName(), stack.getStackVersion(), svc, componentName, type.name());
-        
+
         if (null == defs || 0 == defs.size())
           continue;
-        
+
         for (MetricDefinition m : defs) {
           if (m.getType().equals("ganglia")) {
             gangliaMap.put(componentName, getPropertyInfo(m));
@@ -149,12 +164,20 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
             jmxMap.put(componentName, getPropertyInfo(m));
           } else {
             PropertyProvider pp = getDelegate(m);
-            if (null != pp)
+            if(pp == null) {
+              pp = getDelegate(m,
+                  streamProvider, metricsHostProvider,
+                  clusterNamePropertyId, hostNamePropertyId,
+                  componentNamePropertyId, resourceStatePropertyId);
+            }
+            if(pp != null) {
               additional.add(pp);
+            }
+
           }
         }
       }
-        
+
       if (gangliaMap.size() > 0) {
         GangliaPropertyProvider gpp = type.equals (Resource.Type.Component) ?
           new GangliaComponentPropertyProvider(gangliaMap,
@@ -163,22 +186,23 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
           new GangliaHostComponentPropertyProvider(gangliaMap,
               streamProvider, sslConfig, gangliaHostProvider,
               clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId);
-          
+
           gpp.populateResources(resources, request, predicate);
       } else {
         defaultGanglia.populateResources(resources, request, predicate);
       }
-      
+
       if (jmxMap.size() > 0) {
         JMXPropertyProvider jpp = new JMXPropertyProvider(jmxMap, streamProvider,
-            jmxHostProvider, clusterNamePropertyId, hostNamePropertyId,
-            componentNamePropertyId, jmxStatePropertyId, Collections.singleton("STARTED"));
-        
+            jmxHostProvider, metricsHostProvider,
+            clusterNamePropertyId, hostNamePropertyId,
+            componentNamePropertyId, resourceStatePropertyId);
+
         jpp.populateResources(resources, request, predicate);
       } else {
         defaultJmx.populateResources(resources, request, predicate);
       }
-      
+
       for (PropertyProvider pp : additional) {
         pp.populateResources(resources, request, predicate);
       }
@@ -187,7 +211,7 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
       e.printStackTrace();
       throw new SystemException("Error loading deferred resources", e);
     }
-    
+
     return resources;
   }
 
@@ -195,59 +219,134 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
   public Set<String> checkPropertyIds(Set<String> propertyIds) {
     return Collections.emptySet();
   }
-  
+
   /**
    * @param def the metric definition
-   * @return the converted Map required for JMX or Ganglia execution
+   * @return the converted Map required for JMX or Ganglia execution.
+   * Format: <metric name, property info>
    */
-  private  Map<String, PropertyInfo> getPropertyInfo(MetricDefinition def) {
+  private Map<String, PropertyInfo> getPropertyInfo(MetricDefinition def) {
     Map<String, PropertyInfo> defs = new HashMap<String, PropertyInfo>();
-    
-    for (Entry<String,Metric> entry : def.getMetrics().entrySet()) {
+
+    for (Entry<String, Metric> entry : def.getMetrics().entrySet()) {
       Metric metric = entry.getValue();
       defs.put(entry.getKey(), new PropertyInfo(
           metric.getName(), metric.isTemporal(), metric.isPointInTime()));
     }
-    
+
     return defs;
   }
-  
+
   /**
-   * @param the metric definition for a component and resource type combination
+   * @param definition metric definition for a component and resource type combination
    * @return the custom property provider
    */
   private PropertyProvider getDelegate(MetricDefinition definition) {
+    try {
+      Class<?> clz = Class.forName(definition.getType());
+
+      // singleton/factory
       try {
-        Class<?> clz = Class.forName(definition.getType());
-
-        // singleton/factory
-        try {
-          Method m = clz.getMethod("getInstance", Map.class, Map.class);
-          Object o = m.invoke(null, definition.getProperties(), definition.getMetrics());
-          return PropertyProvider.class.cast(o);
-        } catch (Exception e) {
-          LOG.info("Could not load singleton or factory method for type '" +
-              definition.getType());
-        }
-        
-        // try maps constructor        
-        try {
-          Constructor<?> ct = clz.getConstructor(Map.class, Map.class);
-          Object o = ct.newInstance(definition.getProperties(), definition.getMetrics());
-          return PropertyProvider.class.cast(o);
-        } catch (Exception e) {
-          LOG.info("Could not find contructor for type '" +
-              definition.getType());
-        }
-        
-        // just new instance
-        return PropertyProvider.class.cast(clz.newInstance());
+        Method m = clz.getMethod("getInstance", Map.class, Map.class);
+        Object o = m.invoke(null, definition.getProperties(), definition.getMetrics());
+        return PropertyProvider.class.cast(o);
+      } catch (Exception e) {
+        LOG.info("Could not load singleton or factory method for type '" +
+            definition.getType());
+      }
+
+      // try maps constructor
+      try {
+        Constructor<?> ct = clz.getConstructor(Map.class, Map.class);
+        Object o = ct.newInstance(definition.getProperties(), definition.getMetrics());
+        return PropertyProvider.class.cast(o);
+      } catch (Exception e) {
+        LOG.info("Could not find contructor for type '" +
+            definition.getType());
+      }
+
+      // just new instance
+      return PropertyProvider.class.cast(clz.newInstance());
+
+    } catch (Exception e) {
+      LOG.error("Could not load class " + definition.getType());
+      return null;
+    }
+  }
+
+  /**
+   *
+   * @param definition the metric definition for a component
+   * @param streamProvider the stream provider
+   * @param metricsHostProvider the metrics host provider
+   * @param clusterNamePropertyId the cluster name property id
+   * @param hostNamePropertyId the host name property id
+   * @param componentNamePropertyId the component name property id
+   * @param statePropertyId the state property id
+   * @return the custom property provider
+   */
+
+  private PropertyProvider getDelegate(MetricDefinition definition,
+                                       StreamProvider streamProvider,
+                                       MetricsHostProvider metricsHostProvider,
+                                       String clusterNamePropertyId,
+                                       String hostNamePropertyId,
+                                       String componentNamePropertyId,
+                                       String statePropertyId) {
+    Map<String, PropertyInfo> metrics = getPropertyInfo(definition);
+    HashMap<String, Map<String, PropertyInfo>> componentMetrics =
+        new HashMap<String, Map<String, PropertyInfo>>();
+    componentMetrics.put(WRAPPED_METRICS_KEY, metrics);
+
+    try {
+      Class<?> clz = Class.forName(definition.getType());
+      // singleton/factory
+      try {
+                /*
+         * Interface for singleton/factory method invocation TBD
+         * when implementing the first real use
+         */
+        Method m = clz.getMethod("getInstance", Map.class, Map.class);
+        Object o = m.invoke(
+            definition.getProperties(), componentMetrics,
+            streamProvider, clusterNamePropertyId, hostNamePropertyId,
+            componentNamePropertyId, statePropertyId);
+        return PropertyProvider.class.cast(o);
+      } catch (Exception e) {
+        LOG.info("Could not load singleton or factory method for type '" +
+            definition.getType());
+      }
 
+      // try maps constructor
+      try {
+                /*
+         * Warning: this branch is already used, that's why please adjust
+         * all implementations when modifying constructor interface
+         */
+        Constructor<?> ct = clz.getConstructor(Injector.class, Map.class,
+            Map.class, StreamProvider.class, MetricsHostProvider.class,
+            String.class, String.class, String.class, String.class);
+        Object o = ct.newInstance(
+            injector,
+            definition.getProperties(), componentMetrics,
+            streamProvider, metricsHostProvider,
+            clusterNamePropertyId, hostNamePropertyId,
+            componentNamePropertyId, statePropertyId);
+        return PropertyProvider.class.cast(o);
       } catch (Exception e) {
-        LOG.error("Could not load class " + definition.getType());
-        return null;
+        LOG.info("Could not find contructor for type '" +
+            definition.getType());
       }
+
+      // just new instance
+      return PropertyProvider.class.cast(clz.newInstance());
+
+    } catch (Exception e) {
+      LOG.error("Could not load class " + definition.getType());
+      return null;
+    }
+
+
   }
-  
 
 }

+ 0 - 13
ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java

@@ -26,19 +26,6 @@ import java.util.Set;
  */
 public interface JMXHostProvider {
 
-  /**
-   * Get the JMX host name for the given cluster name and component name.
-   *
-   * @param clusterName    the cluster name
-   * @param componentName  the component name
-   *
-   * @return the JMX host name
-   *
-   * @throws SystemException if unable to get the JMX host name
-   */
-  public String getHostName(String clusterName, String componentName)
-      throws SystemException;
-
   /**
    * Get the JMX host names for the given cluster name and component name.
    *

+ 14 - 260
ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java

@@ -27,20 +27,12 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
 import org.apache.ambari.server.controller.internal.PropertyInfo;
+import org.apache.ambari.server.controller.metrics.MetricsHostProvider;
+import org.apache.ambari.server.controller.metrics.MetricsProvider;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
@@ -57,39 +49,11 @@ import org.slf4j.LoggerFactory;
 /**
  * Property provider implementation for JMX sources.
  */
-public class JMXPropertyProvider extends AbstractPropertyProvider {
+public class JMXPropertyProvider extends MetricsProvider {
 
   private static final String NAME_KEY = "name";
   private static final String PORT_KEY = "tag.port";
   private static final String DOT_REPLACEMENT_CHAR = "#";
-  private static final long DEFAULT_POPULATE_TIMEOUT_MILLIS = 12000L;
-
-  public static final String TIMED_OUT_MSG = "Timed out waiting for JMX metrics.";
-  public static final String STORM_REST_API = "STORM_REST_API";
-
-  /**
-   * Thread pool
-   */
-  private static final ExecutorService EXECUTOR_SERVICE;
-  private static final int THREAD_POOL_CORE_SIZE = 20;
-  private static final int THREAD_POOL_MAX_SIZE = 100;
-  private static final long THREAD_POOL_TIMEOUT_MILLIS = 30000L;
-
-  static {
-    LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(); // unlimited Queue
-
-    ThreadPoolExecutor threadPoolExecutor =
-        new ThreadPoolExecutor(
-            THREAD_POOL_CORE_SIZE,
-            THREAD_POOL_MAX_SIZE,
-            THREAD_POOL_TIMEOUT_MILLIS,
-            TimeUnit.MILLISECONDS,
-            queue);
-
-    threadPoolExecutor.allowCoreThreadTimeOut(true);
-
-    EXECUTOR_SERVICE = threadPoolExecutor;
-  }
 
   private final static ObjectReader jmxObjectReader;
   private final static ObjectReader stormObjectReader;
@@ -138,16 +102,6 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
 
   private final String statePropertyId;
 
-  private final Set<String> healthyStates;
-
-  /**
-   * The amount of time that this provider will wait for JMX metric values to be
-   * returned from the JMX sources.  If no results are returned for this amount of
-   * time then the request to populate the resources will fail.
-   */
-  protected long populateTimeout = DEFAULT_POPULATE_TIMEOUT_MILLIS;
-
-
   // ----- Constructors ------------------------------------------------------
 
   /**
@@ -155,23 +109,23 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
    *
    * @param componentMetrics         the map of supported metrics
    * @param streamProvider           the stream provider
-   * @param jmxHostProvider          the host mapping
+   * @param jmxHostProvider          the JMX host mapping
+   * @param metricsHostProvider      the host mapping
    * @param clusterNamePropertyId    the cluster name property id
    * @param hostNamePropertyId       the host name property id
    * @param componentNamePropertyId  the component name property id
    * @param statePropertyId          the state property id
-   * @param healthyStates            the set of healthy state values
    */
   public JMXPropertyProvider(Map<String, Map<String, PropertyInfo>> componentMetrics,
                              StreamProvider streamProvider,
                              JMXHostProvider jmxHostProvider,
+                             MetricsHostProvider metricsHostProvider,
                              String clusterNamePropertyId,
                              String hostNamePropertyId,
                              String componentNamePropertyId,
-                             String statePropertyId,
-                             Set<String> healthyStates) {
+                             String statePropertyId) {
 
-    super(componentMetrics);
+    super(componentMetrics, hostNamePropertyId, metricsHostProvider);
 
     this.streamProvider           = streamProvider;
     this.jmxHostProvider          = jmxHostProvider;
@@ -179,115 +133,10 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
     this.hostNamePropertyId       = hostNamePropertyId;
     this.componentNamePropertyId  = componentNamePropertyId;
     this.statePropertyId          = statePropertyId;
-    this.healthyStates            = healthyStates;
   }
-  
-  // ----- PropertyProvider --------------------------------------------------
-
-  @Override
-  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
-      throws SystemException {
-
-    // Get a valid ticket for the request.
-    Ticket ticket = new Ticket();
-
-    CompletionService<Resource> completionService =
-        new ExecutorCompletionService<Resource>(EXECUTOR_SERVICE);
-
-    // In a large cluster we could have thousands of resources to populate here.
-    // Distribute the work across multiple threads.
-    for (Resource resource : resources) {
-      completionService.submit(getPopulateResourceCallable(resource, request, predicate, ticket));
-    }
-
-    Set<Resource> keepers = new HashSet<Resource>();
-    try {
-      for (int i = 0; i < resources.size(); ++ i) {
-        Future<Resource> resourceFuture =
-            completionService.poll(populateTimeout, TimeUnit.MILLISECONDS);
-
-        if (resourceFuture == null) {
-          // its been more than the populateTimeout since the last callable completed ...
-          // invalidate the ticket to abort the threads and don't wait any longer
-          ticket.invalidate();
-          LOG.error(TIMED_OUT_MSG);
-          break;
-        } else {
-          // future should already be completed... no need to wait on get
-          Resource resource = resourceFuture.get();
-          if (resource != null) {
-            keepers.add(resource);
-          }
-        }
-      }
-    } catch (InterruptedException e) {
-      logException(e);
-    } catch (ExecutionException e) {
-      rethrowSystemException(e.getCause());
-    }
-    return keepers;
-  }
-
 
   // ----- helper methods ----------------------------------------------------
 
-  /**
-   * Set the populate timeout value for this provider.
-   *
-   * @param populateTimeout  the populate timeout value
-   */
-  protected void setPopulateTimeout(long populateTimeout) {
-    this.populateTimeout = populateTimeout;
-  }
-
-  /**
-   * Get the spec to locate the JMX stream from the given host and port
-   *
-   * @param protocol  the protocol, one of http or https
-   * @param hostName  the host name
-   * @param port      the port
-   *
-   * @return the spec
-   */
-  protected String getSpec(String protocol, String hostName,
-                           String port, String componentName) {
-      if (null == componentName || !componentName.equals(STORM_REST_API))
-        return protocol + "://" + hostName + ":" + port + "/jmx";
-      else
-        return protocol + "://" + hostName + ":" + port + "/api/cluster/summary";
-  }
-
-  /**
-   * Get the spec to locate the JMX stream from the given host and port
-   *
-   * @param hostName  the host name
-   * @param port      the port
-   *
-   * @return the spec
-   */
-  protected String getSpec(String hostName, String port) {
-      return getSpec("http", hostName, port, null);
-  }
-  
-  /**
-   * Get a callable that can be used to populate the given resource.
-   *
-   * @param resource  the resource to be populated
-   * @param request   the request
-   * @param predicate the predicate
-   * @param ticket    a valid ticket
-   *
-   * @return a callable that can be used to populate the given resource
-   */
-  private Callable<Resource> getPopulateResourceCallable(
-      final Resource resource, final Request request, final Predicate predicate, final Ticket ticket) {
-    return new Callable<Resource>() {
-      public Resource call() throws SystemException {
-        return populateResource(resource, request, predicate, ticket);
-      }
-    };
-  }
-
   /**
    * Populate a resource by obtaining the requested JMX properties.
    *
@@ -298,7 +147,8 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
    *
    * @return the populated resource; null if the resource should NOT be part of the result set for the given predicate
    */
-  private Resource populateResource(Resource resource, Request request, Predicate predicate, Ticket ticket)
+  @Override
+  protected Resource populateResource(Resource resource, Request request, Predicate predicate, MetricsProvider.Ticket ticket)
       throws SystemException {
 
     Set<String> ids = getRequestPropertyIds(request, predicate);
@@ -354,16 +204,14 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
       try {
         for (String hostName : hostNames) {
           try {
-            in = streamProvider.readFrom(getSpec(protocol, hostName, port, componentName));
+            in = streamProvider.readFrom(getSpec(protocol, hostName, port, "/jmx"));
             // if the ticket becomes invalid (timeout) then bail out
             if (!ticket.isValid()) {
               return resource;
             }
-            if (null == componentName || !componentName.equals(STORM_REST_API)) {
-              getHadoopMetricValue(in, ids, resource, request, ticket);
-            } else {
-              getStormMetricValue(in, ids, resource, ticket);
-            }
+
+            getHadoopMetricValue(in, ids, resource, request, ticket);
+
           } catch (IOException e) {
             logException(e);
           }
@@ -470,31 +318,6 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
     }
   }
 
-  /**
-   * TODO: Refactor
-   * Storm-specific metrics fetching
-   */
-  private void getStormMetricValue(InputStream in, Set<String> ids,
-                                   Resource resource, Ticket ticket) throws IOException {
-    HashMap<String, Object> metricHolder = stormObjectReader.readValue(in);
-    for (String category : ids) {
-      Map<String, PropertyInfo> defProps = getComponentMetrics().get(STORM_REST_API);
-      for (Map.Entry<String, PropertyInfo> depEntry : defProps.entrySet()) {
-        if (depEntry.getKey().startsWith(category)) {
-          PropertyInfo propInfo = depEntry.getValue();
-          String propName = propInfo.getPropertyId();
-          Object propertyValue = metricHolder.get(propName);
-          String absId = PropertyHelper.getPropertyId(category, propName);
-          if (!ticket.isValid()) {
-            return;
-          }
-          // TODO: Maybe cast to int
-          resource.setProperty(absId, propertyValue);
-        }
-      }
-    }
-  }
-
   private void setResourceValue(Resource resource, Map<String, Map<String, Object>> categories, String propertyId,
                                 String category, String property, List<String> keyList) {
     Map<String, Object> properties = categories.get(category);
@@ -546,73 +369,4 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
     }
     return null;
   }
-
-  /**
-   * Determine whether or not the given property id was requested.
-   */
-  private static boolean isRequestedPropertyId(String propertyId, String requestedPropertyId, Request request) {
-    return request.getPropertyIds().isEmpty() || propertyId.startsWith(requestedPropertyId);
-  }
-
-  /**
-   * Log an error for the given exception.
-   *
-   * @param throwable  the caught exception
-   *
-   * @return the error message that was logged
-   */
-  private static String logException(Throwable throwable) {
-    String msg = "Caught exception getting JMX metrics : " + throwable.getLocalizedMessage();
-
-    LOG.debug(msg, throwable);
-
-    return msg;
-  }
-
-  /**
-   * Rethrow the given exception as a System exception and log the message.
-   *
-   * @param throwable  the caught exception
-   *
-   * @throws org.apache.ambari.server.controller.spi.SystemException always around the given exception
-   */
-  private static void rethrowSystemException(Throwable throwable) throws SystemException {
-    String msg = logException(throwable);
-
-    if (throwable instanceof SystemException) {
-      throw (SystemException) throwable;
-    }
-    throw new SystemException (msg, throwable);
-  }
-
-
-  // ----- inner class : Ticket ----------------------------------------------
-
-  /**
-   * Ticket used to cancel provider threads.  The provider threads should
-   * monitor the validity of the passed in ticket and bail out if it becomes
-   * invalid (as in a timeout).
-   */
-  private static class Ticket {
-    /**
-     * Indicate whether or not the ticket is valid.
-     */
-    private volatile boolean valid = true;
-
-    /**
-     * Invalidate the ticket.
-     */
-    public void invalidate() {
-      valid = false;
-    }
-
-    /**
-     * Determine whether or not this ticket is valid.
-     *
-     * @return true if the ticket is valid
-     */
-    public boolean isValid() {
-      return valid;
-    }
-  }
 }

+ 38 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsHostProvider.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.metrics;
+
+
+import org.apache.ambari.server.controller.spi.SystemException;
+
+public interface MetricsHostProvider {
+
+  /**
+   * Get the host name for the given cluster name and component name.
+   *
+   * @param clusterName   the cluster name
+   * @param componentName the component name
+   * @return the host name
+   * @throws org.apache.ambari.server.controller.spi.SystemException
+   *          if unable to get the JMX host name
+   */
+  public String getHostName(String clusterName, String componentName)
+      throws SystemException;
+
+}

+ 302 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsProvider.java

@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.metrics;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+
+import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
+import org.apache.ambari.server.controller.internal.PropertyInfo;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+
+/**
+ * Unites common functionality for multithreaded metrics providers
+ * (JMX and REST as of now). Shares the same pool of executor threads.
+ */
+public abstract class MetricsProvider extends AbstractPropertyProvider {
+
+  /**
+   * Host states that make available metrics collection
+   */
+  public static final Set<String> healthyStates = Collections.singleton("STARTED");
+  protected final String hostNamePropertyId;
+  private final MetricsHostProvider metricsHostProvider;
+
+  /**
+   * Executor service is shared between all childs of current class
+   */
+  private static final ExecutorService EXECUTOR_SERVICE = initExecutorService();
+  private static final int THREAD_POOL_CORE_SIZE = 20;
+  private static final int THREAD_POOL_MAX_SIZE = 100;
+  private static final long THREAD_POOL_TIMEOUT_MILLIS = 30000L;
+
+  private static final long DEFAULT_POPULATE_TIMEOUT_MILLIS = 10000L;
+  /**
+   * The amount of time that this provider will wait for JMX metric values to be
+   * returned from the JMX sources.  If no results are returned for this amount of
+   * time then the request to populate the resources will fail.
+   */
+  protected long populateTimeout = DEFAULT_POPULATE_TIMEOUT_MILLIS;
+  public static final String TIMED_OUT_MSG = "Timed out waiting for metrics.";
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a provider.
+   *
+   * @param componentMetrics map of metrics for this provider
+   */
+  public MetricsProvider(Map<String, Map<String, PropertyInfo>> componentMetrics,
+                         String hostNamePropertyId,
+                         MetricsHostProvider metricsHostProvider) {
+    super(componentMetrics);
+    this.hostNamePropertyId = hostNamePropertyId;
+    this.metricsHostProvider = metricsHostProvider;
+  }
+
+  // ----- Thread pool -------------------------------------------------------
+
+  /**
+   * Generates thread pool with default parameters
+   */
+
+
+  private static ExecutorService initExecutorService() {
+    LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(); // unlimited Queue
+
+    ThreadPoolExecutor threadPoolExecutor =
+        new ThreadPoolExecutor(
+            THREAD_POOL_CORE_SIZE,
+            THREAD_POOL_MAX_SIZE,
+            THREAD_POOL_TIMEOUT_MILLIS,
+            TimeUnit.MILLISECONDS,
+            queue);
+
+    threadPoolExecutor.allowCoreThreadTimeOut(true);
+
+    return threadPoolExecutor;
+  }
+
+  public static ExecutorService getExecutorService() {
+    return EXECUTOR_SERVICE;
+  }
+
+  // ----- Common PropertyProvider implementation details --------------------
+
+  @Override
+  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
+      throws SystemException {
+
+    // Get a valid ticket for the request.
+    Ticket ticket = new Ticket();
+
+    CompletionService<Resource> completionService =
+        new ExecutorCompletionService<Resource>(EXECUTOR_SERVICE);
+
+    // In a large cluster we could have thousands of resources to populate here.
+    // Distribute the work across multiple threads.
+    for (Resource resource : resources) {
+      completionService.submit(getPopulateResourceCallable(resource, request, predicate, ticket));
+    }
+
+    Set<Resource> keepers = new HashSet<Resource>();
+    try {
+      for (int i = 0; i < resources.size(); ++ i) {
+        Future<Resource> resourceFuture =
+            completionService.poll(populateTimeout, TimeUnit.MILLISECONDS);
+
+        if (resourceFuture == null) {
+          // its been more than the populateTimeout since the last callable completed ...
+          // invalidate the ticket to abort the threads and don't wait any longer
+          ticket.invalidate();
+          LOG.error(TIMED_OUT_MSG);
+          break;
+        } else {
+          // future should already be completed... no need to wait on get
+          Resource resource = resourceFuture.get();
+          if (resource != null) {
+            keepers.add(resource);
+          }
+        }
+      }
+    } catch (InterruptedException e) {
+      logException(e);
+    } catch (ExecutionException e) {
+      rethrowSystemException(e.getCause());
+    }
+    return keepers;
+  }
+
+  /**
+   * Get a callable that can be used to populate the given resource.
+   *
+   * @param resource  the resource to be populated
+   * @param request   the request
+   * @param predicate the predicate
+   * @param ticket    a valid ticket
+   *
+   * @return a callable that can be used to populate the given resource
+   */
+  private Callable<Resource> getPopulateResourceCallable(
+      final Resource resource, final Request request, final Predicate predicate, final Ticket ticket) {
+    return new Callable<Resource>() {
+      public Resource call() throws SystemException {
+        return populateResource(resource, request, predicate, ticket);
+      }
+    };
+  }
+
+
+  /**
+   * Populate a resource by obtaining the requested JMX properties.
+   *
+   * @param resource  the resource to be populated
+   * @param request   the request
+   * @param predicate the predicate
+   * @return the populated resource; null if the resource should NOT be part of the result set for the given predicate
+   */
+
+
+  protected abstract Resource populateResource(Resource resource,
+                                               Request request, Predicate predicate, Ticket ticket)
+
+      throws SystemException;
+
+  /**
+   * Set the populate timeout value for this provider.
+   *
+   * @param populateTimeout the populate timeout value
+   */
+
+
+  protected void setPopulateTimeout(long populateTimeout) {
+    this.populateTimeout = populateTimeout;
+
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Determine whether or not the given property id was requested.
+   */
+  protected static boolean isRequestedPropertyId(String propertyId, String requestedPropertyId, Request request) {
+    return request.getPropertyIds().isEmpty() || propertyId.startsWith(requestedPropertyId);
+  }
+
+  /**
+   * Log an error for the given exception.
+   *
+   * @param throwable  the caught exception
+   *
+   * @return the error message that was logged
+   */
+  protected static String logException(Throwable throwable) {
+    String msg = "Caught exception getting JMX metrics : " + throwable.getLocalizedMessage();
+
+    LOG.debug(msg, throwable);
+
+    return msg;
+  }
+
+  /**
+   * Rethrow the given exception as a System exception and log the message.
+   *
+   * @param throwable  the caught exception
+   *
+   * @throws org.apache.ambari.server.controller.spi.SystemException always around the given exception
+   */
+  protected static void rethrowSystemException(Throwable throwable) throws SystemException {
+    String msg = logException(throwable);
+
+    if (throwable instanceof SystemException) {
+      throw (SystemException) throwable;
+    }
+    throw new SystemException (msg, throwable);
+  }
+
+  /**
+   * Returns a hostname for component
+   */
+
+
+  public String getHost(Resource resource, String clusterName, String componentName) throws SystemException {
+    return hostNamePropertyId == null ?
+        metricsHostProvider.getHostName(clusterName, componentName) :
+        (String) resource.getPropertyValue(hostNamePropertyId);
+
+  }
+
+
+  /**
+   * Get complete URL from parts
+   */
+
+  protected String getSpec(String protocol, String hostName,
+                           String port, String url) {
+    return protocol + "://" + hostName + ":" + port + url;
+
+  }
+
+  // ----- inner class : Ticket ----------------------------------------------
+
+  /**
+   * Ticket used to cancel provider threads.  The provider threads should
+   * monitor the validity of the passed in ticket and bail out if it becomes
+   * invalid (as in a timeout).
+   */
+  protected static class Ticket {
+    /**
+     * Indicate whether or not the ticket is valid.
+     */
+    private volatile boolean valid = true;
+
+    /**
+     * Invalidate the ticket.
+     */
+    public void invalidate() {
+      valid = false;
+    }
+
+    /**
+     * Determine whether or not this ticket is valid.
+     *
+     * @return true if the ticket is valid
+     */
+    public boolean isValid() {
+      return valid;
+    }
+  }
+
+}

+ 448 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/RestMetricsPropertyProvider.java

@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.ambari.server.controller.metrics;
+
+import com.google.gson.Gson;
+import com.google.gson.JsonElement;
+import com.google.gson.reflect.TypeToken;
+import com.google.gson.stream.JsonReader;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.internal.PropertyInfo;
+import org.apache.ambari.server.controller.internal.StackDefinedPropertyProvider;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.utilities.StreamProvider;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.lang.reflect.Type;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Hashtable;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * WARNING: Class should be thread-safe!
+ * <p/>
+ * Resolves metrics like api/cluster/summary/nimbus.uptime
+ * For every metric, finds a relevant JSON value and returns is as
+ * a resource property.
+ */
+public class RestMetricsPropertyProvider extends MetricsProvider {
+
+  protected final static Logger LOG =
+      LoggerFactory.getLogger(RestMetricsPropertyProvider.class);
+
+  private static Map<String, RestMetricsPropertyProvider> instances =
+      new Hashtable<String, RestMetricsPropertyProvider>();
+
+  @Inject
+  private AmbariManagementController amc;
+
+  @Inject
+  private Clusters clusters;
+
+  private final Map<String, String> metricsProperties;
+  private final StreamProvider streamProvider;
+  private final String clusterNamePropertyId;
+  private final String componentNamePropertyId;
+  private final String statePropertyId;
+  private MetricsHostProvider metricsHostProvider;
+
+  private static final String DEFAULT_PORT_PROPERTY = "default_port";
+  private static final String PORT_CONFIG_TYPE_PROPERTY = "port_config_type";
+  private static final String PORT_PROPERTY_NAME_PROPERTY = "port_property_name";
+
+  /**
+   * Protocol to use when connecting
+   */
+  private static final String PROTOCOL_OVERRIDE_PROPERTY = "protocol";
+  private static final String HTTP_PROTOCOL = "http";
+  private static final String HTTPS_PROTOCOL = "https";
+  private static final String DEFAULT_PROTOCOL = HTTP_PROTOCOL;
+
+
+  /**
+   * String that separates JSON URL from path inside JSON in metrics path
+   */
+  public static final String URL_PATH_SEPARATOR = "##";
+
+  /**
+   * Symbol that separates names of nested JSON sections in metrics path
+   */
+  public static final String DOCUMENT_PATH_SEPARATOR = "#";
+
+
+  /**
+   * Create a REST property provider.
+   *
+   * @param metricsProperties       the map of per-component metrics properties
+   * @param componentMetrics        the map of supported metrics for component
+   * @param streamProvider          the stream provider
+   * @param metricsHostProvider     metricsHostProvider instance
+   * @param clusterNamePropertyId   the cluster name property id
+   * @param hostNamePropertyId      the host name property id
+   * @param componentNamePropertyId the component name property id
+   * @param statePropertyId         the state property id
+   */
+  public RestMetricsPropertyProvider(
+      Injector injector,
+      Map<String, String> metricsProperties,
+      Map<String, Map<String, PropertyInfo>> componentMetrics,
+      StreamProvider streamProvider,
+      MetricsHostProvider metricsHostProvider,
+      String clusterNamePropertyId,
+      String hostNamePropertyId,
+      String componentNamePropertyId,
+      String statePropertyId) {
+
+    super(componentMetrics, hostNamePropertyId, metricsHostProvider);
+    this.metricsProperties = metricsProperties;
+    this.streamProvider = streamProvider;
+    this.clusterNamePropertyId = clusterNamePropertyId;
+    this.componentNamePropertyId = componentNamePropertyId;
+    this.statePropertyId = statePropertyId;
+    this.metricsHostProvider = metricsHostProvider;
+    injector.injectMembers(this);
+  }
+
+  // ----- MetricsProvider implementation ------------------------------------
+
+
+  /**
+   * Populate a resource by obtaining the requested REST properties.
+   *
+   * @param resource  the resource to be populated
+   * @param request   the request
+   * @param predicate the predicate
+   * @return the populated resource; null if the resource should NOT be
+   *         part of the result set for the given predicate
+   */
+  @Override
+  protected Resource populateResource(Resource resource,
+                                      Request request, Predicate predicate, Ticket ticket)
+      throws SystemException {
+
+    // Remove request properties that request temporal information
+    Set<String> ids = getRequestPropertyIds(request, predicate);
+    Set<String> temporalIds = new HashSet<String>();
+    for (String id : ids) {
+      if (request.getTemporalInfo(id) != null) {
+        temporalIds.add(id);
+      }
+    }
+    ids.removeAll(temporalIds);
+
+    if (ids.isEmpty()) {
+      // no properties requested
+      return resource;
+    }
+
+    // Don't attempt to get REST properties if the resource is in
+    // an unhealthy state
+    if (statePropertyId != null) {
+      String state = (String) resource.getPropertyValue(statePropertyId);
+      if (state != null && !healthyStates.contains(state)) {
+        return resource;
+      }
+    }
+
+    String componentName = (String) resource.getPropertyValue(componentNamePropertyId);
+
+    Map<String, PropertyInfo> propertyInfos =
+        getComponentMetrics().get(StackDefinedPropertyProvider.WRAPPED_METRICS_KEY);
+    if (propertyInfos == null) {
+      // If there are no metrics defined for the given component then there is nothing to do.
+      return resource;
+    }
+    String protocol = resolveProtocol();
+    String port = "-1";
+    String hostname = null;
+    try {
+      String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
+      Cluster cluster = clusters.getCluster(clusterName);
+      hostname = getHost(resource, clusterName, componentName);
+      if (hostname == null) {
+        String msg = String.format("Unable to get component REST metrics. " +
+            "No host name for %s.", componentName);
+        LOG.warn(msg);
+        return resource;
+      }
+      port = resolvePort(cluster, hostname, componentName, metricsProperties);
+    } catch (Exception e) {
+      rethrowSystemException(e);
+    }
+
+    Set<String> resultIds = new HashSet<String>();
+    for (String id : ids){
+      for (String metricId : propertyInfos.keySet()){
+        if (metricId.startsWith(id)){
+          resultIds.add(metricId);
+        }
+      }
+    }
+
+    // Extract set of URLs for metrics
+    HashMap<String, Set<String>> urls = extractPropertyURLs(resultIds, propertyInfos);
+
+    for (String url : urls.keySet()) {
+      try {
+        InputStream in = streamProvider.readFrom(getSpec(protocol, hostname, port, url));
+        if (!ticket.isValid()) {
+          return resource;
+        }
+        try {
+          extractValuesFromJSON(in, urls.get(url), resource, propertyInfos);
+        } finally {
+          in.close();
+        }
+      } catch (IOException e) {
+        logException(e);
+      }
+    }
+    return resource;
+  }
+
+  @Override
+  public Set<String> checkPropertyIds(Set<String> propertyIds) {
+    Set<String> unsupported = new HashSet<String>();
+    for (String propertyId : propertyIds) {
+      if (!getComponentMetrics().
+          get(StackDefinedPropertyProvider.WRAPPED_METRICS_KEY).
+          containsKey(propertyId)) {
+        unsupported.add(propertyId);
+      }
+    }
+    return unsupported;
+  }
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Uses port_config_type, port_property_name, default_port parameters from
+   * metricsProperties to find out right port value for service
+   *
+   * @return determines REST port for service
+   */
+  private String resolvePort(Cluster cluster, String hostname, String componentName,
+                          Map<String, String> metricsProperties)
+      throws AmbariException {
+    String portConfigType = null;
+    String portPropertyName = null;
+    if (metricsProperties.containsKey(PORT_CONFIG_TYPE_PROPERTY) &&
+        metricsProperties.containsKey(PORT_PROPERTY_NAME_PROPERTY)) {
+      portConfigType = metricsProperties.get(PORT_CONFIG_TYPE_PROPERTY);
+      portPropertyName = metricsProperties.get(PORT_PROPERTY_NAME_PROPERTY);
+    }
+    String portStr = null;
+    if (portConfigType != null && portPropertyName != null) {
+      try {
+        Map<String, Map<String, String>> configTags =
+            amc.findConfigurationTagsWithOverrides(cluster, hostname);
+        if (configTags.containsKey(portConfigType)) {
+          Map<String, String> config = configTags.get(portConfigType);
+          if (config.containsKey(portPropertyName)) {
+            portStr = config.get(portPropertyName);
+          }
+        }
+      } catch (AmbariException e) {
+        String message = String.format("Can not extract config tags for " +
+            "cluster = %s, hostname = %s", componentName, hostname);
+        LOG.warn(message);
+      }
+      if (portStr == null) {
+        String message = String.format(
+            "Can not extract REST port for " +
+                "component %s from configurations. " +
+                "Config tag = %s, config key name = %s, " +
+                "hostname = %s. Probably metrics.json file for " +
+                "service is misspelled. Trying default port",
+            componentName, portConfigType,
+            portPropertyName, hostname);
+        LOG.warn(message);
+      }
+    }
+    if (portStr == null && metricsProperties.containsKey(DEFAULT_PORT_PROPERTY)) {
+      if (metricsProperties.containsKey(DEFAULT_PORT_PROPERTY)) {
+        portStr = metricsProperties.get(DEFAULT_PORT_PROPERTY);
+      } else {
+        String message = String.format("Can not determine REST port for " +
+            "component %s. " +
+            "Default REST port property %s is not defined at metrics.json " +
+            "file for service, and there is no any other available ways " +
+            "to determine port information.",
+            componentName, DEFAULT_PORT_PROPERTY);
+        throw new AmbariException(message);
+      }
+    }
+      return portStr;
+  }
+
+
+  /**
+   * Extracts protocol type from metrics properties. If no protocol is defined,
+   * uses default protocol.
+   */
+  private String resolveProtocol() {
+    String protocol = DEFAULT_PROTOCOL;
+    if (metricsProperties.containsKey(PROTOCOL_OVERRIDE_PROPERTY)) {
+      protocol = metricsProperties.get(PROTOCOL_OVERRIDE_PROPERTY).toLowerCase();
+      if (!protocol.equals(HTTP_PROTOCOL) && !protocol.equals(HTTPS_PROTOCOL)) {
+        String message = String.format(
+            "Unsupported protocol type %s, falling back to %s",
+            protocol, DEFAULT_PROTOCOL);
+        LOG.warn(message);
+        protocol = DEFAULT_PROTOCOL;
+      }
+    } else {
+      protocol = DEFAULT_PROTOCOL;
+    }
+    return protocol;
+  }
+
+
+  /**
+   * Extracts JSON URL from metricsPath
+   */
+  private String extractMetricsURL(String metricsPath)
+      throws IllegalArgumentException {
+    return validateAndExtractPathParts(metricsPath)[0];
+  }
+
+  /**
+   * Extracts part of metrics path that contains path through nested
+   * JSON sections
+   */
+  private String extractDocumentPath(String metricsPath)
+      throws IllegalArgumentException {
+    return validateAndExtractPathParts(metricsPath)[1];
+  }
+
+  /**
+   * Returns [MetricsURL, DocumentPath] or throws an exception
+   * if metricsPath is invalid.
+   */
+  private String[] validateAndExtractPathParts(String metricsPath)
+      throws IllegalArgumentException {
+    String[] pathParts = metricsPath.split(URL_PATH_SEPARATOR);
+    if (pathParts.length == 2) {
+      return pathParts;
+    } else {
+      // This warning is expected to occur only on development phase
+      String message = String.format(
+          "Metrics path %s does not contain or contains" +
+              "more than one %s sequence. That probably " +
+              "means that the mentioned metrics path is misspelled. " +
+              "Please check the relevant metrics.json file",
+          metricsPath, URL_PATH_SEPARATOR);
+      throw new IllegalArgumentException(message);
+    }
+  }
+
+
+  /**
+   * Returns a map <document_url, requested_property_ids>.
+   * requested_property_ids contain a set of property IDs
+   * that should be fetched for this URL. Doing
+   * that allows us to extract document only once when getting few properties
+   * from this document.
+   *
+   * @param ids set of property IDs that should be fetched
+   */
+  private HashMap<String, Set<String>> extractPropertyURLs(Set<String> ids,
+                                                           Map<String, PropertyInfo> propertyInfos) {
+    HashMap<String, Set<String>> result = new HashMap<String, Set<String>>();
+    for (String requestedPropertyId : ids) {
+      PropertyInfo propertyInfo = propertyInfos.get(requestedPropertyId);
+
+      String metricsPath = propertyInfo.getPropertyId();
+      String url = extractMetricsURL(metricsPath);
+      Set<String> set;
+      if (!result.containsKey(url)) {
+        set = new HashSet<String>();
+        result.put(url, set);
+      } else {
+        set = result.get(url);
+      }
+      set.add(requestedPropertyId);
+    }
+    return result;
+  }
+
+
+  /**
+   * Extracts requested properties from a given JSON input stream into
+   * resource.
+   *
+   * @param jsonStream           input stream that contains JSON
+   * @param requestedPropertyIds a set of property IDs
+   *                             that should be fetched for this URL
+   * @param resource             all extracted values are placed into resource
+   */
+  private void extractValuesFromJSON(InputStream jsonStream,
+                                     Set<String> requestedPropertyIds,
+                                     Resource resource,
+                                     Map<String, PropertyInfo> propertyInfos)
+      throws IOException {
+    Gson gson = new Gson();
+    Type type = new TypeToken<Map<Object, Object>>() {
+    }.getType();
+    JsonReader jsonReader = new JsonReader(
+        new BufferedReader(new InputStreamReader(jsonStream)));
+    Map<String, String> jsonMap = gson.fromJson(jsonReader, type);
+    for (String requestedPropertyId : requestedPropertyIds) {
+      PropertyInfo propertyInfo = propertyInfos.get(requestedPropertyId);
+      String metricsPath = propertyInfo.getPropertyId();
+      String documentPath = extractDocumentPath(metricsPath);
+      String[] docPath = documentPath.split(DOCUMENT_PATH_SEPARATOR);
+      Map<String, String> subMap = jsonMap;
+      for (int i = 0; i < docPath.length; i++) {
+        String pathElement = docPath[i];
+        if (!subMap.containsKey(pathElement)) {
+          String message = String.format(
+              "Can not fetch %dth element of document path (%s) " +
+                  "from json. Wrong metrics path: %s",
+              i, pathElement, metricsPath);
+          throw new IOException(message);
+        }
+        Object jsonSubElement = jsonMap.get(pathElement);
+        if (i == docPath.length - 1) { // Reached target document section
+          // Extract property value
+          resource.setProperty(requestedPropertyId, jsonSubElement);
+        } else { // Navigate to relevant document section
+          subMap = gson.fromJson((JsonElement) jsonSubElement, type);
+        }
+      }
+    }
+  }
+
+}

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java

@@ -1274,7 +1274,7 @@ public class ViewRegistry {
       setViewStatus(viewDefinition, ViewEntity.ViewStatus.DEPLOYED, "Deployed " + extractedArchiveDirPath + ".");
 
     } catch (Exception e) {
-      String msg = "Caught exception loading view " + viewDefinition.getViewName();
+      String msg = "Caught exception loading view " + viewDefinition.getName();
 
       setViewStatus(viewDefinition, ViewEntity.ViewStatus.ERROR, msg + " : " + e.getMessage());
       LOG.error(msg, e);

+ 8 - 0
ambari-server/src/main/package/deb/control/preinst

@@ -14,9 +14,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License
 
+STACKS_FOLDER="/var/lib/ambari-server/resources/stacks"
+STACKS_FOLDER_OLD=/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
+
 if [ -d "/etc/ambari-server/conf.save" ]
 then
     mv /etc/ambari-server/conf.save /etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save
 fi
 
+if [ -d "$STACKS_FOLDER" ]
+then
+    cp -r "$STACKS_FOLDER" "$STACKS_FOLDER_OLD"
+fi
+
 exit 0

+ 8 - 0
ambari-server/src/main/package/rpm/preinstall.sh

@@ -13,9 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License
 
+STACKS_FOLDER="/var/lib/ambari-server/resources/stacks"
+STACKS_FOLDER_OLD=/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
+
 if [ -d "/etc/ambari-server/conf.save" ]
 then
     mv /etc/ambari-server/conf.save /etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save
 fi
 
+if [ -d "$STACKS_FOLDER" ]
+then
+    cp -r "$STACKS_FOLDER" "$STACKS_FOLDER_OLD"
+fi
+
 exit 0

+ 3 - 3
ambari-server/src/main/python/ambari-server.py

@@ -3057,10 +3057,10 @@ def sync_ldap():
     err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
     raise FatalException(1, err)
 
-  admin_login = get_validated_string_input(prompt="Enter admin login: ", default=None,
+  admin_login = get_validated_string_input(prompt="Enter Ambari Admin login: ", default=None,
                                            pattern=None, description=None,
                                            is_pass=False, allowEmpty=False)
-  admin_password = get_validated_string_input(prompt="Enter admin password: ", default=None,
+  admin_password = get_validated_string_input(prompt="Enter Ambari Admin password: ", default=None,
                                               pattern=None, description=None,
                                               is_pass=True, allowEmpty=False)
 
@@ -3169,7 +3169,7 @@ def get_ldap_event_spec_names(file, specs, new_specs):
     else:
       err = 'Sync event creation failed. File ' + file + ' not found.'
       raise FatalException(1, err)
-  except:
+  except Exception as exception:
       err = 'Caught exception reading file ' + file + ' : ' + str(exception)
       raise FatalException(1, err)
 

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<configuration supports_final="true">
+<configuration supports_final="false">
 
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>

+ 8 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/yarn.py

@@ -60,6 +60,13 @@ def yarn(name = None):
     )
     params.HdfsDirectory(None, action="create")
 
+  if name == "nodemanager":
+    Directory(params.nm_local_dirs.split(',') + params.nm_log_dirs.split(','),
+              owner=params.yarn_user,
+              recursive=True,
+              ignore_failures=True,
+              )
+
   Directory([params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
@@ -71,7 +78,7 @@ def yarn(name = None):
             group=params.user_group,
             recursive=True
   )
-  Directory(params.nm_local_dirs.split(',')+params.nm_log_dirs.split(',')+[params.yarn_log_dir_prefix],
+  Directory([params.yarn_log_dir_prefix],
             owner=params.yarn_user,
             recursive=True,
             ignore_failures=True,

+ 6 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml

@@ -73,6 +73,12 @@
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
   </property>
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
   <!-- hadoop-env.sh -->
   <property>
     <name>content</name>

+ 24 - 14
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_datanode.py

@@ -16,10 +16,31 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import os
 
 from resource_management import *
+from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
 from utils import service
-import os
+
+
+def create_dirs(data_dir, params):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  Directory(os.path.dirname(data_dir),
+            recursive=True,
+            mode=0755,
+            ignore_failures=True
+  )
+  Directory(data_dir,
+            recursive=False,
+            mode=0750,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
 
 def datanode(action=None):
   import params
@@ -30,19 +51,8 @@ def datanode(action=None):
               mode=0751,
               owner=params.hdfs_user,
               group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(os.path.dirname(data_dir),
-                recursive=True,
-                mode=0755,
-                ignore_failures=True
-      )
-      Directory(data_dir,
-                recursive=False,
-                mode=0750,
-                owner=params.hdfs_user,
-                group=params.user_group,
-                ignore_failures=True
-      )
+
+    handle_dfs_data_dir(create_dirs, params)
 
   elif action == "start" or action == "stop":
     service(

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py

@@ -122,6 +122,8 @@ namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/form
 fs_checkpoint_dir = config['configurations']['core-site']['fs.checkpoint.dir']
 
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']
+data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/capacity-scheduler.xml

@@ -22,7 +22,7 @@
 <!-- The properties for a queue follow a naming convention,such as, -->
 <!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
 
-<configuration supports_final="true" supports_adding_forbidden="true">
+<configuration supports_final="false" supports_adding_forbidden="true">
 
   <property>
     <name>mapred.capacity-scheduler.maximum-system-jobs</name>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/capacity-scheduler.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<configuration supports_final="true">
+<configuration supports_final="false">
 
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>

+ 11 - 8
ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn.py

@@ -59,6 +59,16 @@ def yarn(name = None):
     )
     params.HdfsDirectory(None, action="create")
 
+  if name == "nodemanager":
+    Directory(params.nm_local_dirs.split(','),
+              owner=params.yarn_user,
+              recursive=True
+    )
+    Directory(params.nm_log_dirs.split(','),
+              owner=params.yarn_user,
+              recursive=True
+    )
+
   Directory([params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
@@ -70,14 +80,7 @@ def yarn(name = None):
             group=params.user_group,
             recursive=True
   )
-  Directory(params.nm_local_dirs.split(','),
-            owner=params.yarn_user,
-            recursive=True
-  )
-  Directory(params.nm_log_dirs.split(','),
-            owner=params.yarn_user,
-            recursive=True
-  )
+
   Directory(params.yarn_log_dir_prefix,
             owner=params.yarn_user,
             recursive=True

+ 31 - 31
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py

@@ -33,42 +33,42 @@ def hbase_decommission(env):
   )
   
   if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
 
-    if params.hbase_drain_only == 'true':
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-          pass
-      pass
-
-    else:
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
 
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
-          regionmover_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
 
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
 
-          Execute(regionmover_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-        pass
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
       pass
     pass
-
+  pass
+  
 
   pass

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py

@@ -46,7 +46,7 @@ else:
 hadoop_conf_dir = "/etc/hadoop/conf"
 hbase_conf_dir = "/etc/hbase/conf"
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = config['commandParams']['mark_draining_only']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
 hbase_included_hosts = config['commandParams']['included_hosts']
 
 hbase_user = status_params.hbase_user

+ 6 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml

@@ -73,7 +73,12 @@
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
   </property>
-  
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
   <!-- hadoop-env.sh -->
   <property>
     <name>content</name>

+ 18 - 9
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py

@@ -18,25 +18,34 @@ limitations under the License.
 """
 
 from resource_management import *
+from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
 from utils import service
 
+
+def create_dirs(data_dir, params):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  Directory(data_dir,
+            recursive=True,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
+
 def datanode(action=None):
   import params
-
   if action == "configure":
     Directory(params.dfs_domain_socket_dir,
               recursive=True,
               mode=0751,
               owner=params.hdfs_user,
               group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(data_dir,
-                recursive=True,
-                mode=0755,
-                owner=params.hdfs_user,
-                group=params.user_group,
-                ignore_failures=True
-      )
+
+    handle_dfs_data_dir(create_dirs, params)
 
   elif action == "start" or action == "stop":
     service(

+ 11 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -144,6 +144,13 @@ namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
 fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
 
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
@@ -203,8 +210,11 @@ hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 java_home = config['hostLevelParams']['java_home']
+stack_version = str(config['hostLevelParams']['stack_version'])
+
+stack_is_champlain_or_further = not (stack_version.startswith('2.0') or stack_version.startswith('2.1'))
 
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
+if stack_version.startswith('2.0') and System.get_instance().os_family != "suse":
   # deprecated rhel jsvc_path
   jsvc_path = "/usr/libexec/bigtop-utils"
 else:

+ 87 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py

@@ -16,8 +16,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import os
 
 from resource_management import *
+import re
 
 
 def service(action=None, name=None, user=None, create_pid_dir=False,
@@ -30,10 +32,6 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
   check_process = format(
     "ls {pid_file} >/dev/null 2>&1 &&"
     " ps `cat {pid_file}` >/dev/null 2>&1")
-  hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
   if create_pid_dir:
     Directory(pid_dir,
@@ -44,10 +42,74 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
               owner=user,
               recursive=True)
 
+  hadoop_env_exports = {
+    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
+  }
+
   if params.security_enabled and name == "datanode":
+    dfs_dn_port = get_port(params.dfs_dn_addr)
+    dfs_dn_http_port = get_port(params.dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(params.dfs_dn_https_addr)
+
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if params.dfs_http_policy == "HTTPS_ONLY":
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+    elif params.dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+
+    # Calculate HADOOP_SECURE_DN_* env vars, but not append them yet
+    # These variables should not be set when starting secure datanode as a non-root
+    ## On secure datanodes, user to run the datanode as after dropping privileges
+    hadoop_secure_dn_user = params.hdfs_user
+    ## Where log files are stored in the secure data environment.
+    hadoop_secure_dn_log_dir = format("{hdfs_log_dir_prefix}/{hadoop_secure_dn_user}")
+    ## The directory where pid files are stored in the secure data environment.
+    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hadoop_secure_dn_user}")
+    hadoop_secure_dn_exports = {
+      'HADOOP_SECURE_DN_USER' : hadoop_secure_dn_user,
+      'HADOOP_SECURE_DN_LOG_DIR' : hadoop_secure_dn_log_dir,
+      'HADOOP_SECURE_DN_PID_DIR' : hadoop_secure_dn_pid_dir
+    }
+    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
+
+    # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
+    if not params.stack_is_champlain_or_further or secure_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+      if params.stack_is_champlain_or_further:
+        hadoop_env_exports.update(hadoop_secure_dn_exports)
+
+    if action == 'stop' and params.stack_is_champlain_or_further and \
+      os.path.isfile(hadoop_secure_dn_pid_file):
+        # We need special handling for this case to handle the situation
+        # when we configure non-root secure DN and then restart it
+        # to handle new configs. Otherwise we will not be able to stop
+        # a running instance
+        user = "root"
+        try:
+          with open(hadoop_secure_dn_pid_file, 'r') as f:
+            pid = f.read()
+          os.kill(int(pid), 0)
+          hadoop_env_exports.update(hadoop_secure_dn_exports)
+        except IOError:
+          pass  # Can not open pid file
+        except ValueError:
+          pass  # Pid file content is invalid
+        except OSError:
+          pass  # Process is not running
+
+
+  hadoop_env_exports_str = ''
+  for exp in hadoop_env_exports.items():
+    hadoop_env_exports_str += "export {0}={1} && ".format(exp[0], exp[1])
+
+  hadoop_daemon = format(
+    "{hadoop_env_exports_str}"
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
   daemon_cmd = format("{ulimit_cmd} su - {user} -c '{cmd} {action} {name}'")
 
@@ -64,3 +126,24 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
     File(pid_file,
          action="delete",
     )
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False

+ 15 - 15
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py

@@ -44,7 +44,21 @@ def hive(name=None):
   # The reason is that stale-configs are service-level, not component.
   for conf_dir in params.hive_conf_dirs_list:
     fill_conf_dir(conf_dir)
-    
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
   if name == 'metastore' or name == 'hiveserver2':
     jdbc_connector()
     
@@ -93,20 +107,6 @@ def hive(name=None):
     crt_directory(params.hive_log_dir)
     crt_directory(params.hive_var_lib)
 
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hive_env_sh_template)
-    )
-
 def fill_conf_dir(component_conf_dir):
   import params
   

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<configuration supports_final="true" supports_adding_forbidden="true">
+<configuration supports_final="false" supports_adding_forbidden="true">
 
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>

+ 8 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py

@@ -60,6 +60,13 @@ def yarn(name = None):
     )
     params.HdfsDirectory(None, action="create")
 
+  if name == "nodemanager":
+    Directory(params.nm_local_dirs.split(',') + params.nm_log_dirs.split(','),
+              owner=params.yarn_user,
+              recursive=True,
+              ignore_failures=True,
+              )
+
   Directory([params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
@@ -71,7 +78,7 @@ def yarn(name = None):
             group=params.user_group,
             recursive=True
   )
-  Directory(params.nm_local_dirs.split(',')+params.nm_log_dirs.split(',')+[params.yarn_log_dir_prefix],
+  Directory([params.yarn_log_dir_prefix],
             owner=params.yarn_user,
             recursive=True,
             ignore_failures=True,

+ 0 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2

@@ -20,5 +20,4 @@ Client {
 com.sun.security.auth.module.Krb5LoginModule required
 useKeyTab=false
 useTicketCache=true;
-principal="{{zk_principal}}";
 };

+ 24 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py

@@ -201,7 +201,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
         if siteName in recommendedDefaults:
           siteProperties = getSiteProperties(configurations, siteName)
           if siteProperties is not None:
-            resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"])
+            resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"], configurations)
             items.extend(resultItems)
     return items
 
@@ -259,7 +259,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
     return None
 
-  def validateMapReduce2Configurations(self, properties, recommendedDefaults):
+  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations):
     validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
                         {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
                         {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
@@ -269,7 +269,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
                         {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')} ]
     return self.toConfigurationValidationProblems(validationItems, "mapred-site")
 
-  def validateYARNConfigurations(self, properties, recommendedDefaults):
+  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations):
     validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
                         {"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
                         {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
@@ -349,3 +349,24 @@ def formatXmxSizeToBytes(value):
     modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024
     }[1]
   return to_number(value) * m
+
+def getPort(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def isSecurePort(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<configuration supports_final="true">
+<configuration supports_final="false">
 
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>

+ 11 - 8
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/yarn.py

@@ -59,6 +59,16 @@ def yarn(name = None):
     )
     params.HdfsDirectory(None, action="create")
 
+  if name == "nodemanager":
+    Directory(params.nm_local_dirs.split(','),
+              owner=params.yarn_user,
+              recursive=True
+    )
+    Directory(params.nm_log_dirs.split(','),
+              owner=params.yarn_user,
+              recursive=True
+    )
+
   Directory([params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
@@ -70,14 +80,7 @@ def yarn(name = None):
             group=params.user_group,
             recursive=True
   )
-  Directory(params.nm_local_dirs.split(','),
-            owner=params.yarn_user,
-            recursive=True
-  )
-  Directory(params.nm_log_dirs.split(','),
-            owner=params.yarn_user,
-            recursive=True
-  )
+
   Directory(params.yarn_log_dir_prefix,
             owner=params.yarn_user,
             recursive=True

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/falcon-env.xml

@@ -91,7 +91,7 @@ export FALCON_SERVER_OPTS="-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Df
 # java heap size we want to set for the falcon server. Default is 1024MB
 #export FALCON_SERVER_HEAP=
 
-# What is is considered as falcon home dir. Default is the base locaion of the installed software
+# What is is considered as falcon home dir. Default is the base location of the installed software
 #export FALCON_HOME_DIR=
 
 # Where log files are stored. Defatult is logs directory under the base install location

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/falcon.py

@@ -79,11 +79,13 @@ def falcon(type, action = None):
 
     if action == 'start':
       Execute(format('{falcon_home}/bin/falcon-start -port {falcon_port}'),
-              user=params.falcon_user
+              user=params.falcon_user,
+              path=params.hadoop_bin_dir
       )
     if action == 'stop':
       Execute(format('{falcon_home}/bin/falcon-stop'),
-              user=params.falcon_user
+              user=params.falcon_user,
+              path=params.hadoop_bin_dir
       )
       File(params.server_pid_file,
            action='delete'

+ 0 - 89
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/templates/startup.properties.j2

@@ -1,89 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-####################################################
-####    This is used for falcon packaging only. ####
-####################################################
-
-*.domain=${falcon.app.type}
-
-######### Implementation classes #########
-## DONT MODIFY UNLESS SURE ABOUT CHANGE ##
-*.workflow.engine.impl=org.apache.falcon.workflow.engine.OozieWorkflowEngine
-*.oozie.process.workflow.builder=org.apache.falcon.workflow.OozieProcessWorkflowBuilder
-*.oozie.feed.workflow.builder=org.apache.falcon.workflow.OozieFeedWorkflowBuilder
-*.journal.impl=org.apache.falcon.transaction.SharedFileSystemJournal
-*.SchedulableEntityManager.impl=org.apache.falcon.resource.SchedulableEntityManager
-*.ConfigSyncService.impl=org.apache.falcon.resource.ConfigSyncService
-*.ProcessInstanceManager.impl=org.apache.falcon.resource.InstanceManager
-*.catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
-
-*.application.services=org.apache.falcon.entity.store.ConfigurationStore,\
-                        org.apache.falcon.service.ProcessSubscriberService,\
-                        org.apache.falcon.rerun.service.RetryService,\
-						org.apache.falcon.rerun.service.LateRunService,\
-						org.apache.falcon.service.LogCleanupService
-prism.application.services=org.apache.falcon.entity.store.ConfigurationStore
-*.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
-                        org.apache.falcon.entity.ColoClusterRelation,\
-                        org.apache.falcon.group.FeedGroupMap,\
-                        org.apache.falcon.service.SharedLibraryHostingService
-prism.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
-                        org.apache.falcon.entity.ColoClusterRelation,\
-                        org.apache.falcon.group.FeedGroupMap
-*.broker.impl.class=org.apache.activemq.ActiveMQConnectionFactory
-*.shared.libs=activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms,s4fs-0.1.jar
-
-######### Implementation classes #########
-
-*.config.store.uri={{store_uri}}
-*.system.lib.location=${falcon.home}/server/webapp/falcon/WEB-INF/lib
-prism.system.lib.location=${falcon.home}/server/webapp/prism/WEB-INF/lib
-*.broker.url=tcp://localhost:61616
-*.retry.recorder.path=${falcon.log.dir}/retry
-
-*.falcon.cleanup.service.frequency=days(1)
-
-#default time-to-live for a JMS message 3 days (time in minutes)
-*.broker.ttlInMins=4320
-*.entity.topic=FALCON.ENTITY.TOPIC
-*.max.retry.failure.count=1
-
-######### Properties for configuring iMon client and metric #########
-*.internal.queue.size=1000
-*.current.colo=default
-*.falcon.authentication.type=simple
-*.falcon.http.authentication.type=simple

+ 34 - 21
ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/metrics.json

@@ -2,45 +2,52 @@
   "STORM_REST_API": {
     "Component": [
       {
-        "type": "jmx",
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties" : {
+          "default_port": "8745",
+          "port_config_type": "storm-site",
+          "port_property_name": "storm.port",
+          "protocol": "http"
+        },
         "metrics": {
-          "metrics/api/cluster/summary/tasks.total": {
-            "metric": "tasks.total",
+          "metrics/api/cluster/summary/tasks.total":
+          {
+            "metric": "/api/cluster/summary##tasks.total",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/slots.total": {
-            "metric": "slots.total",
+            "metric": "/api/cluster/summary##slots.total",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/slots.free": {
-            "metric": "slots.free",
+            "metric": "/api/cluster/summary##slots.free",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/supervisors": {
-            "metric": "supervisors",
+            "metric": "/api/cluster/summary##supervisors",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/executors.total": {
-            "metric": "executors.total",
+            "metric": "/api/cluster/summary##executors.total",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/slots.used": {
-            "metric": "slots.used",
+            "metric": "/api/cluster/summary##slots.used",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/topologies": {
-            "metric": "topologies",
+            "metric": "/api/cluster/summary##topologies",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/nimbus.uptime": {
-            "metric": "nimbus.uptime",
+            "metric": "/api/cluster/summary##nimbus.uptime",
             "pointInTime": true,
             "temporal": false
           }
@@ -49,51 +56,57 @@
     ],
     "HostComponent": [
       {
-        "type": "jmx",
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties" : {
+          "default_port": "8745",
+          "port_config_type": "storm-site",
+          "port_property_name": "storm.port",
+          "protocol": "http"
+        },
         "metrics": {
-          "metrics/api/cluster/summary/tasks.total": {
-            "metric": "tasks.total",
+          "metrics/api/cluster/summary/tasks.total":
+          {
+            "metric": "/api/cluster/summary##tasks.total",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/slots.total": {
-            "metric": "slots.total",
+            "metric": "/api/cluster/summary##slots.total",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/slots.free": {
-            "metric": "slots.free",
+            "metric": "/api/cluster/summary##slots.free",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/supervisors": {
-            "metric": "supervisors",
+            "metric": "/api/cluster/summary##supervisors",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/executors.total": {
-            "metric": "executors.total",
+            "metric": "/api/cluster/summary##executors.total",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/slots.used": {
-            "metric": "slots.used",
+            "metric": "/api/cluster/summary##slots.used",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/topologies": {
-            "metric": "topologies",
+            "metric": "/api/cluster/summary##topologies",
             "pointInTime": true,
             "temporal": false
           },
           "metrics/api/cluster/summary/nimbus.uptime": {
-            "metric": "nimbus.uptime",
+            "metric": "/api/cluster/summary##nimbus.uptime",
             "pointInTime": true,
             "temporal": false
           }
         }
       }
-
     ]
   },
   "NIMBUS": {

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/capacity-scheduler.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<configuration supports_final="true">
+<configuration supports_final="false">
 
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py

@@ -77,13 +77,13 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
     parentValidators.update(childValidators)
     return parentValidators
 
-  def validateHiveConfigurations(self, properties, recommendedDefaults):
+  def validateHiveConfigurations(self, properties, recommendedDefaults, configurations):
     validationItems = [ {"config-name": 'hive.tez.container.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.tez.container.size')},
                         {"config-name": 'hive.tez.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'hive.tez.java.opts')},
                         {"config-name": 'hive.auto.convert.join.noconditionaltask.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.auto.convert.join.noconditionaltask.size')} ]
     return self.toConfigurationValidationProblems(validationItems, "hive-site")
 
-  def validateTezConfigurations(self, properties, recommendedDefaults):
+  def validateTezConfigurations(self, properties, recommendedDefaults, configurations):
     validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
                         {"config-name": 'tez.am.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'tez.am.java.opts')} ]
     return self.toConfigurationValidationProblems(validationItems, "tez-site")

+ 84 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/falcon-startup.properties.xml

@@ -0,0 +1,84 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+
+  <property>
+    <name>*.journal.impl</name>
+    <value>org.apache.falcon.transaction.SharedFileSystemJournal</value>
+    <description>Journal implementation class</description>
+  </property>
+  <property>
+    <name>*.application.services</name>
+    <value>org.apache.falcon.security.AuthenticationInitializationService,\
+      org.apache.falcon.workflow.WorkflowJobEndNotificationService, \
+      org.apache.falcon.service.ProcessSubscriberService,\
+      org.apache.falcon.entity.store.ConfigurationStore,\
+      org.apache.falcon.rerun.service.RetryService,\
+      org.apache.falcon.rerun.service.LateRunService,\
+      org.apache.falcon.service.LogCleanupService
+    </value>
+    <description>Falcon Services</description>
+  </property>
+  <property>
+    <name>prism.application.services</name>
+    <value>org.apache.falcon.entity.store.ConfigurationStore</value>
+    <description>Prism Services</description>
+  </property>
+  <property>
+    <name>prism.configstore.listeners</name>
+    <value>org.apache.falcon.entity.v0.EntityGraph,\
+      org.apache.falcon.entity.ColoClusterRelation,\
+      org.apache.falcon.group.FeedGroupMap
+    </value>
+    <description>Prism Configuration Store Change listeners</description>
+  </property>
+  <!--<property>-->
+    <!--<name>*.workflow.execution.listeners</name>-->
+    <!--<value> </value>-->
+    <!--<description>Workflow Job Execution Completion listeners</description>-->
+  <!--</property>-->
+  <property>
+    <name>*.falcon.security.authorization.enabled</name>
+    <value>false</value>
+    <description>Authorization Enabled flag</description>
+  </property>
+  <property>
+    <name>*.falcon.security.authorization.superusergroup</name>
+    <value>falcon</value>
+    <description>The name of the group of super-users</description>
+  </property>
+  <property>
+    <name>*.falcon.security.authorization.admin.users</name>
+    <value>falcon,ambari-qa</value>
+    <description>Admin Users, comma separated users</description>
+  </property>
+  <property>
+    <name>*.falcon.security.authorization.admin.groups</name>
+    <value>falcon</value>
+    <description>Admin Group Membership, comma separated users</description>
+  </property>
+  <property>
+    <name>*.falcon.security.authorization.provider</name>
+    <value>org.apache.falcon.security.DefaultAuthorizationProvider</value>
+    <description>Authorization Provider Implementation Fully Qualified Class Name</description>
+  </property>
+
+</configuration>

+ 192 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml

@@ -0,0 +1,192 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_root_logger</name>
+    <value>INFO,RFA</value>
+    <description>Hadoop Root Logger</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>User to run HDFS as</description>
+  </property>
+  
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+
+#Hadoop logging options
+export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
+    </value>
+  </property>
+  
+</configuration>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/SLIDER/metainfo.xml

@@ -21,7 +21,7 @@
     <service>
       <name>SLIDER</name>
       <displayName>Slider</displayName>
-      <comment>Apache Slider is a YARN application to deploy existing distributed applications on YARN, monitor them and make them larger or smaller as desired -even while the application is running.</comment>
+      <comment>A framework for deploying, managing and monitoring existing distributed applications on YARN.</comment>
       <version>0.60.0.2.2.0.0</version>
       <components>
         <component>

+ 1079 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metrics.json

@@ -0,0 +1,1079 @@
+{
+  "STORM_UI_SERVER": {
+    "Component": [
+      {
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties" : {
+          "default_port": "8744",
+          "port_config_type": "storm-site",
+          "port_property_name": "storm.port",
+          "protocol": "http"
+        },
+        "metrics": {
+          "metrics/api/v1/cluster/summary/tasksTotal":
+          {
+            "metric": "/api/v1/cluster/summary##tasksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/topology/summary":
+          {
+            "metric": "/api/v1/topology/summary?field=topologies##topologies",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/slotsTotal": {
+            "metric": "/api/v1/cluster/summary##slotsTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/slotsFree": {
+            "metric": "/api/v1/cluster/summary##slotsFree",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/supervisors": {
+            "metric": "/api/v1/cluster/summary##supervisors",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/executorsTotal": {
+            "metric": "/api/v1/cluster/summary##executorsTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/slotsUsed": {
+            "metric": "/api/v1/cluster/summary##slotsUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/nimbusUptime": {
+            "metric": "/api/v1/cluster/summary##nimbusUptime",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties" : {
+          "default_port": "8744",
+          "port_config_type": "storm-site",
+          "port_property_name": "storm.port",
+          "protocol": "http"
+        },
+        "metrics": {
+          "metrics/api/v1/cluster/summary/tasksTotal":
+          {
+            "metric": "/api/v1/cluster/summary##tasksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/topology/summary":
+          {
+            "metric": "/api/v1/topology/summary?field=topologies##topologies",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/slotsTotal": {
+            "metric": "/api/v1/cluster/summary##slotsTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/slotsFree": {
+            "metric": "/api/v1/cluster/summary##slotsFree",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/supervisors": {
+            "metric": "/api/v1/cluster/summary##supervisors",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/executorsTotal": {
+            "metric": "/api/v1/cluster/summary##executorsTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/slotsUsed": {
+            "metric": "/api/v1/cluster/summary##slotsUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/api/v1/cluster/summary/nimbusUptime": {
+            "metric": "/api/v1/cluster/summary##nimbusUptime",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ]
+  },
+  "NIMBUS": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/committed": {
+            "metric": "Nimbus.JVM.Memory.Heap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/init": {
+            "metric": "Nimbus.JVM.Memory.Heap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/max": {
+            "metric": "Nimbus.JVM.Memory.Heap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/used": {
+            "metric": "Nimbus.JVM.Memory.Heap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/os/processcputime": {
+            "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+            "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/threading/threadcount": {
+            "metric": "Nimbus.JVM.Threading.ThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+
+          "metrics/storm/nimbus/freeslots": {
+            "metric": "Free Slots",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/supervisors": {
+            "metric": "Supervisors",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/topologies": {
+            "metric": "Topologies",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/totalexecutors": {
+            "metric": "Total Executors",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/totalslots": {
+            "metric": "Total Slots",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/totaltasks": {
+            "metric": "Total Tasks",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/usedslots": {
+            "metric": "Used Slots",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/committed": {
+            "metric": "Nimbus.JVM.Memory.Heap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/init": {
+            "metric": "Nimbus.JVM.Memory.Heap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/max": {
+            "metric": "Nimbus.JVM.Memory.Heap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/heap/used": {
+            "metric": "Nimbus.JVM.Memory.Heap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+            "metric": "Nimbus.JVM.Memory.NonHeap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/os/processcputime": {
+            "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+            "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/nimbus/jvm/threading/threadcount": {
+            "metric": "Nimbus.JVM.Threading.ThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          }
+
+        }
+      }
+    ]
+  },
+  "SUPERVISOR": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/committed": {
+            "metric": "Supervisor.JVM.Memory.Heap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/init": {
+            "metric": "Supervisor.JVM.Memory.Heap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/max": {
+            "metric": "Supervisor.JVM.Memory.Heap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/used": {
+            "metric": "Supervisor.JVM.Memory.Heap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/os/processcputime": {
+            "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+            "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/threading/threadcount": {
+            "metric": "Supervisor.JVM.Threading.ThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/init": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/max": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/used": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/os/processcputime": {
+            "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+            "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/threading/threadcount": {
+            "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          }
+
+
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/committed": {
+            "metric": "Supervisor.JVM.Memory.Heap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/init": {
+            "metric": "Supervisor.JVM.Memory.Heap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/max": {
+            "metric": "Supervisor.JVM.Memory.Heap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/heap/used": {
+            "metric": "Supervisor.JVM.Memory.Heap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+            "metric": "Supervisor.JVM.Memory.NonHeap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/os/processcputime": {
+            "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+            "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/supervisor/jvm/threading/threadcount": {
+            "metric": "Supervisor.JVM.Threading.ThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/init": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/max": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/heap/used": {
+            "metric": "Worker.(.+).JVM.Memory.Heap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+            "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/os/processcputime": {
+            "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+            "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/storm/worker/$1/jvm/threading/threadcount": {
+            "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      }
+    ]
+  }
+}

+ 151 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py

@@ -0,0 +1,151 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+class HDP22StackAdvisor(HDP21StackAdvisor):
+
+  def getServiceConfigurationRecommenderDict(self):
+    parentRecommendConfDict = super(HDP22StackAdvisor, self).getServiceConfigurationRecommenderDict()
+    childRecommendConfDict = {
+      "HDFS": self.recommendHDFSConfigurations,
+    }
+    parentRecommendConfDict.update(childRecommendConfDict)
+    return parentRecommendConfDict
+
+
+  def recommendHDFSConfigurations(self, configurations, clusterData):
+    self.putProperty(configurations, "hdfs-site")
+
+  def getServiceConfigurationValidators(self):
+    parentValidators = super(HDP22StackAdvisor, self).getServiceConfigurationValidators()
+    childValidators = {
+      "HDFS": ["hdfs-site", self.validateHDFSConfigurations],
+    }
+    parentValidators.update(childValidators)
+    return parentValidators
+
+
+  def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations):
+    # We can not access property hadoop.security.authentication from the
+    # other config (core-site). That's why we are using another heuristics here
+    hdfs_site = properties
+    core_site = getSiteProperties(configurations, "core-site")
+
+    dfs_encrypt_data_transfer = 'dfs.encrypt.data.transfer'  # Hadoop Wire encryption
+    try:
+      wire_encryption_enabled = hdfs_site[dfs_encrypt_data_transfer] == "true"
+    except KeyError:
+      wire_encryption_enabled = False
+
+    HTTP_ONLY = 'HTTP_ONLY'
+    HTTPS_ONLY = 'HTTPS_ONLY'
+    HTTP_AND_HTTPS = 'HTTP_AND_HTTPS'
+
+    VALID_HTTP_POLICY_VALUES = [HTTP_ONLY, HTTPS_ONLY, HTTP_AND_HTTPS]
+    VALID_TRANSFER_PROTECTION_VALUES = ['authentication', 'integrity', 'privacy']
+
+    validationItems = []
+    if (not wire_encryption_enabled and   # If wire encryption is enabled at Hadoop, it disables all our checks
+          core_site['hadoop.security.authentication'] == 'kerberos' and
+          core_site['hadoop.security.authorization'] == 'true'):
+      # security is enabled
+
+      dfs_http_policy = 'dfs.http.policy'
+      dfs_datanode_address = 'dfs.datanode.address'
+      datanode_http_address = 'dfs.datanode.http.address'
+      datanode_https_address = 'dfs.datanode.https.address'
+      data_transfer_protection = 'dfs.data.transfer.protection'
+
+      try: # Params may be absent
+        privileged_dfs_dn_port = isSecurePort(getPort(hdfs_site[dfs_datanode_address]))
+      except KeyError:
+        privileged_dfs_dn_port = False
+      try:
+        privileged_dfs_http_port = isSecurePort(getPort(hdfs_site[datanode_http_address]))
+      except KeyError:
+        privileged_dfs_http_port = False
+      try:
+        privileged_dfs_https_port = isSecurePort(getPort(hdfs_site[datanode_https_address]))
+      except KeyError:
+        privileged_dfs_https_port = False
+      try:
+        dfs_http_policy_value = hdfs_site[dfs_http_policy]
+      except KeyError:
+        dfs_http_policy_value = HTTP_ONLY  # Default
+      try:
+        data_transfer_protection_value = hdfs_site[data_transfer_protection]
+      except KeyError:
+        data_transfer_protection_value = None
+
+      if dfs_http_policy_value not in VALID_HTTP_POLICY_VALUES:
+        validationItems.append({"config-name": dfs_http_policy,
+                                "item": self.getWarnItem(
+                                  "Invalid property value: {0}. Valid values are {1}".format(
+                                    dfs_http_policy_value, VALID_HTTP_POLICY_VALUES))})
+
+      # determine whether we use secure ports
+      address_properties_with_warnings = []
+      if dfs_http_policy_value == HTTPS_ONLY:
+        any_privileged_ports_are_in_use = privileged_dfs_dn_port or privileged_dfs_https_port
+        if any_privileged_ports_are_in_use:
+          important_properties = [dfs_datanode_address, datanode_https_address]
+          message = "You set up datanode to use some non-secure ports, but {0} is set to {1}. " \
+                    "If you want to run Datanode under non-root user in a secure cluster, " \
+                    "you should set all these properties {2} " \
+                    "to use non-secure ports (if property {3} does not exist, " \
+                    "just add it). You may also set up property {4} ('{5}' is a good default value). " \
+                    "Also, set up WebHDFS with SSL as " \
+                    "described in manual in order to be able to " \
+                    "use HTTPS.".format(dfs_http_policy, dfs_http_policy_value, important_properties,
+                                        datanode_https_address, data_transfer_protection,
+                                        VALID_TRANSFER_PROTECTION_VALUES[0])
+          address_properties_with_warnings.extend(important_properties)
+      else:  # dfs_http_policy_value == HTTP_AND_HTTPS or HTTP_ONLY
+        # We don't enforce datanode_https_address to use privileged ports here
+        any_nonprivileged_ports_are_in_use = not privileged_dfs_dn_port or not privileged_dfs_http_port
+        if any_nonprivileged_ports_are_in_use:
+          important_properties = [dfs_datanode_address, datanode_http_address]
+          message = "You have set up datanode to use some non-secure ports, but {0} is set to {1}. " \
+                    "In a secure cluster, Datanode forbids using non-secure ports " \
+                    "if {0} is not set to {3}. " \
+                    "Please make sure that properties {2} use secure ports.".format(
+                      dfs_http_policy, dfs_http_policy_value, important_properties, HTTPS_ONLY)
+          address_properties_with_warnings.extend(important_properties)
+
+      # Generate port-related warnings if any
+      for prop in address_properties_with_warnings:
+        validationItems.append({"config-name": prop,
+                                "item": self.getWarnItem(message)})
+
+      # Check if it is appropriate to use dfs.data.transfer.protection
+      if data_transfer_protection_value is not None:
+        if dfs_http_policy_value in [HTTP_ONLY, HTTP_AND_HTTPS]:
+          validationItems.append({"config-name": data_transfer_protection,
+                                  "item": self.getWarnItem(
+                                    "{0} property can not be used when {1} is set to any "
+                                    "value other then {2}. Tip: When {1} property is not defined, it defaults to {3}".format(
+                                    data_transfer_protection, dfs_http_policy, HTTPS_ONLY, HTTP_ONLY))})
+        elif not data_transfer_protection_value in VALID_TRANSFER_PROTECTION_VALUES:
+          validationItems.append({"config-name": data_transfer_protection,
+                                  "item": self.getWarnItem(
+                                    "Invalid property value: {0}. Valid values are {1}.".format(
+                                      data_transfer_protection_value, VALID_TRANSFER_PROTECTION_VALUES))})
+    return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
+
+
+

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java

@@ -617,7 +617,7 @@ public class StackExtensionHelperTest {
     assertEquals(2, attributes.size());
     QName supportsFinal = new QName("", "supports_final");
     assertTrue(attributes.containsKey(supportsFinal));
-    assertEquals("true", attributes.get(supportsFinal));
+    assertEquals("false", attributes.get(supportsFinal));
     QName supportsDeletable = new QName("", "supports_deletable");
     assertTrue(attributes.containsKey(supportsDeletable));
     assertEquals("false", attributes.get(supportsDeletable));

+ 6 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java

@@ -1640,8 +1640,10 @@ public class BlueprintConfigurationProcessorTest {
     // setup properties that include host information
     hiveSiteProperties.put("hive.metastore.uris", expectedHostName + ":" + expectedPortNum);
     hiveSiteProperties.put("javax.jdo.option.ConnectionURL", expectedHostName + ":" + expectedPortNum);
+    hiveSiteProperties.put("hive.zookeeper.quorum", expectedHostName + ":" + expectedPortNum + "," + expectedHostNameTwo + ":" + expectedPortNum);
     hiveEnvProperties.put("hive_hostname", expectedHostName);
 
+
     webHCatSiteProperties.put("templeton.hive.properties", expectedHostName + "," + expectedHostNameTwo);
     webHCatSiteProperties.put("templeton.kerberos.principal", expectedHostName);
 
@@ -1677,6 +1679,10 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("hive property not properly exported",
       createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
 
+    assertEquals("hive zookeeper quorum property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.zookeeper.quorum"));
+
     mockSupport.verifyAll();
   }
 

+ 190 - 167
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java

@@ -33,7 +33,7 @@ import org.apache.ambari.server.controller.ganglia.GangliaPropertyProvider;
 import org.apache.ambari.server.controller.ganglia.GangliaPropertyProviderTest.TestGangliaHostProvider;
 import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
 import org.apache.ambari.server.controller.jmx.TestStreamProvider;
-import org.apache.ambari.server.controller.jmx.JMXPropertyProviderTest.TestJMXHostProvider;
+import org.apache.ambari.server.controller.metrics.JMXPropertyProviderTest;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.PropertyProvider;
 import org.apache.ambari.server.controller.spi.Request;
@@ -65,9 +65,9 @@ import com.google.inject.persist.PersistService;
 public class StackDefinedPropertyProviderTest {
   private static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID = "HostRoles/host_name";
   private static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = "HostRoles/component_name";
-  private static final String HOST_COMPONENT_STATE_PROPERTY_ID = "HostRoles/state";    
+  private static final String HOST_COMPONENT_STATE_PROPERTY_ID = "HostRoles/state";
+
 
-  
   private Clusters clusters = null;
   private Injector injector = null;
 
@@ -78,13 +78,13 @@ public class StackDefinedPropertyProviderTest {
     injector = Guice.createInjector(module);
     injector.getInstance(GuiceJpaInitializer.class);
     StackDefinedPropertyProvider.init(injector);
-    
+
     clusters = injector.getInstance(Clusters.class);
     clusters.addCluster("c1");
-    
+
     Cluster cluster = clusters.getCluster("c1");
     cluster.setDesiredStackVersion(new StackId("HDP-2.0.5"));
-    
+
     clusters.addHost("h1");
     Host host = clusters.getHost("h1");
     Map<String, String> hostAttributes = new HashMap<String, String>();
@@ -92,88 +92,87 @@ public class StackDefinedPropertyProviderTest {
     hostAttributes.put("os_release_version", "6.3");
     host.setHostAttributes(hostAttributes);
     host.persist();
-    
+
     clusters.mapHostToCluster("h1", "c1");
   }
-  
+
   @After
   public void teardown() throws Exception {
-    injector.getInstance(PersistService.class).stop();    
+    injector.getInstance(PersistService.class).stop();
   }
-  
+
   @Test
   public void testPopulateHostComponentResources() throws Exception {
-    TestJMXHostProvider tj = new TestJMXHostProvider(true);
+    JMXPropertyProviderTest.TestJMXHostProvider tj = new JMXPropertyProviderTest.TestJMXHostProvider(true);
     TestGangliaHostProvider tg = new TestGangliaHostProvider();
-    
+    JMXPropertyProviderTest.TestMetricsHostProvider tm = new JMXPropertyProviderTest.TestMetricsHostProvider();
+
     StackDefinedPropertyProvider sdpp = new StackDefinedPropertyProvider(
-        Resource.Type.HostComponent, tj, tg, new CombinedStreamProvider(),
+        Resource.Type.HostComponent, tj, tg, tm, new CombinedStreamProvider(),
         "HostRoles/cluster_name", "HostRoles/host_name", "HostRoles/component_name", "HostRoles/state", null, null);
-    
+
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
     resource.setProperty("HostRoles/cluster_name", "c1");
     resource.setProperty("HostRoles/host_name", "h1");
     resource.setProperty("HostRoles/component_name", "NAMENODE");
     resource.setProperty("HostRoles/state", "STARTED");
-    
+
     // request with an empty set should get all supported properties
     Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
 
     Set<Resource> set = sdpp.populateResources(Collections.singleton(resource), request, null);
     Assert.assertEquals(1, set.size());
-    
+
     Resource res = set.iterator().next();
-    
+
     Map<String, Map<String, Object>> values = res.getPropertiesMap();
-    
+
     Assert.assertTrue("Expected JMX metric 'metrics/dfs/FSNamesystem'", values.containsKey("metrics/dfs/FSNamesystem"));
     Assert.assertTrue("Expected JMX metric 'metrics/dfs/namenode'", values.containsKey("metrics/dfs/namenode"));
     Assert.assertTrue("Expected Ganglia metric 'metrics/rpcdetailed'", values.containsKey("metrics/rpcdetailed"));
   }
-  
-  
+
+
   @Test
   public void testCustomProviders() throws Exception {
-    
+
     StackDefinedPropertyProvider sdpp = new StackDefinedPropertyProvider(
-        Resource.Type.HostComponent, null, null, new CombinedStreamProvider(),
+        Resource.Type.HostComponent, null, null, null, new CombinedStreamProvider(),
         "HostRoles/cluster_name", "HostRoles/host_name", "HostRoles/component_name", "HostRoles/state",
         new EmptyPropertyProvider(), new EmptyPropertyProvider());
-    
+
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
     resource.setProperty("HostRoles/cluster_name", "c1");
     resource.setProperty("HostRoles/host_name", "h1");
     resource.setProperty("HostRoles/component_name", "DATANODE");
     resource.setProperty("HostRoles/state", "STARTED");
-    
+
     // request with an empty set should get all supported properties
     Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet(), new HashMap<String, TemporalInfo>());
 
     Set<Resource> set = sdpp.populateResources(Collections.singleton(resource), request, null);
     Assert.assertEquals(1, set.size());
-    
+
     Resource res = set.iterator().next();
-    
+
     Map<String, Map<String, Object>> values = res.getPropertiesMap();
     Assert.assertTrue(values.containsKey("foo/type1"));
     Assert.assertTrue(values.containsKey("foo/type2"));
     Assert.assertTrue(values.containsKey("foo/type3"));
     Assert.assertFalse(values.containsKey("foo/type4"));
-    
+
     Assert.assertTrue(values.get("foo/type1").containsKey("name"));
     Assert.assertTrue(values.get("foo/type2").containsKey("name"));
     Assert.assertTrue(values.get("foo/type3").containsKey("name"));
 
     Assert.assertEquals("value1", values.get("foo/type1").get("name"));
-    Assert.assertEquals("value2", values.get("foo/type2").get("name"));
-    Assert.assertEquals("value3", values.get("foo/type3").get("name"));
-    
+
   }
-  
 
-  
+
+
   private static class CombinedStreamProvider implements StreamProvider {
 
     @Override
@@ -193,12 +192,12 @@ public class StackDefinedPropertyProviderTest {
       return readFrom(spec);
     }
   }
-  
+
   private static class EmptyPropertyProvider implements PropertyProvider {
 
     @Override
     public Set<Resource> populateResources(Set<Resource> resources,
-        Request request, Predicate predicate) throws SystemException {
+                                           Request request, Predicate predicate) throws SystemException {
       // TODO Auto-generated method stub
       return null;
     }
@@ -210,19 +209,19 @@ public class StackDefinedPropertyProviderTest {
     }
 
   }
-  
+
   /**
    * Test for empty constructor.  Public since instantiated via reflection.
    */
   public static class CustomMetricProvider1 implements PropertyProvider {
     @Override
     public Set<Resource> populateResources(Set<Resource> resources,
-        Request request, Predicate predicate) throws SystemException {
-      
+                                           Request request, Predicate predicate) throws SystemException {
+
       for (Resource r : resources) {
         r.setProperty("foo/type1/name", "value1");
       }
-      
+
       return resources;
     }
 
@@ -237,14 +236,14 @@ public class StackDefinedPropertyProviderTest {
    */
   public static class CustomMetricProvider2 implements PropertyProvider {
     private Map<String, String> providerProperties = null;
-    
+
     public CustomMetricProvider2(Map<String, String> properties, Map<String, Metric> metrics) {
       providerProperties = properties;
     }
 
     @Override
     public Set<Resource> populateResources(Set<Resource> resources,
-        Request request, Predicate predicate) throws SystemException {
+                                           Request request, Predicate predicate) throws SystemException {
       for (Resource r : resources) {
         r.setProperty("foo/type2/name", providerProperties.get("Type2.Metric.Name"));
       }
@@ -263,7 +262,7 @@ public class StackDefinedPropertyProviderTest {
   public static class CustomMetricProvider3 implements PropertyProvider {
     private static CustomMetricProvider3 instance = null;
     private Map<String, String> providerProperties = new HashMap<String, String>();
-    
+
     public static CustomMetricProvider3 getInstance(Map<String, String> properties, Map<String, Metric> metrics) {
       if (null == instance) {
         instance = new CustomMetricProvider3();
@@ -271,10 +270,10 @@ public class StackDefinedPropertyProviderTest {
       }
       return instance;
     }
-    
+
     @Override
     public Set<Resource> populateResources(Set<Resource> resources,
-        Request request, Predicate predicate) throws SystemException {
+                                           Request request, Predicate predicate) throws SystemException {
       for (Resource r : resources) {
         r.setProperty("foo/type3/name", providerProperties.get("Type3.Metric.Name"));
       }
@@ -286,18 +285,20 @@ public class StackDefinedPropertyProviderTest {
       return Collections.emptySet();
     }
   }
-  
+
   @Test
   public void testPopulateResources_HDP2() throws Exception {
-    
+
     StreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(true);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -325,7 +326,7 @@ public class StackDefinedPropertyProviderTest {
     Assert.assertEquals(8192,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AvailableMB")));
     Assert.assertEquals(1,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AvailableVCores")));
     Assert.assertEquals(2,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AppsSubmitted")));
-    
+
     Assert.assertEquals(1,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/ClusterMetrics", "NumActiveNMs")));
     Assert.assertEquals(0,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/ClusterMetrics", "NumDecommissionedNMs")));
     Assert.assertEquals(0,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/ClusterMetrics", "NumLostNMs")));
@@ -346,18 +347,20 @@ public class StackDefinedPropertyProviderTest {
     request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-  }  
-  
+  }
+
   @Test
   public void testPopulateResources_HDP2_params() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -384,31 +387,33 @@ public class StackDefinedPropertyProviderTest {
     Assert.assertEquals(8192, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AvailableMB")));
     Assert.assertEquals(1,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AvailableVCores")));
     Assert.assertEquals(2,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AppsSubmitted")));
-    
+
     Assert.assertEquals(15,   resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/default", "AggregateContainersAllocated")));
     Assert.assertEquals(12,   resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/default", "AggregateContainersReleased")));
     Assert.assertEquals(8192, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/default", "AvailableMB")));
     Assert.assertEquals(1,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/default", "AvailableVCores")));
     Assert.assertEquals(47,   resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/default", "AppsSubmitted")));
-    
+
     Assert.assertEquals(4,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AggregateContainersAllocated")));
     Assert.assertEquals(4,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AggregateContainersReleased")));
     Assert.assertEquals(6048, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AvailableMB")));
     Assert.assertEquals(1,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AvailableVCores")));
     Assert.assertEquals(1,    resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AppsSubmitted")));
-  }  
+  }
 
 
   @Test
   public void testPopulateResources_HDP2_params_singleProperty() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -434,17 +439,19 @@ public class StackDefinedPropertyProviderTest {
     Assert.assertEquals(8192, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AvailableMB")));
     Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root", "AvailableVCores")));
   }
-  
+
   @Test
   public void testPopulateResources_HDP2_params_category() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -489,13 +496,15 @@ public class StackDefinedPropertyProviderTest {
   @Test
   public void testPopulateResources_HDP2_params_category2() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -541,18 +550,20 @@ public class StackDefinedPropertyProviderTest {
     Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AvailableMB")));
     Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AvailableVCores")));
     Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue/root/second_queue", "AppsSubmitted")));
-  }  
+  }
 
   @Test
   public void testPopulateResources_jmx_JournalNode() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -670,20 +681,22 @@ public class StackDefinedPropertyProviderTest {
     cluster.setDesiredStackVersion(new StackId("HDP-2.1.1"));
 
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
-            Resource.Type.HostComponent,
-            hostProvider,
-            gangliaHostProvider,
-            streamProvider,
-            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-            PropertyHelper.getPropertyId("HostRoles", "host_name"),
-            PropertyHelper.getPropertyId("HostRoles", "component_name"),
-            PropertyHelper.getPropertyId("HostRoles", "state"),
-            new EmptyPropertyProvider(),
-            new EmptyPropertyProvider());
+        Resource.Type.HostComponent,
+        hostProvider,
+        gangliaHostProvider,
+        metricsHostProvider,
+        streamProvider,
+        PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+        PropertyHelper.getPropertyId("HostRoles", "host_name"),
+        PropertyHelper.getPropertyId("HostRoles", "component_name"),
+        PropertyHelper.getPropertyId("HostRoles", "state"),
+        new EmptyPropertyProvider(),
+        new EmptyPropertyProvider());
 
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
@@ -698,26 +711,28 @@ public class StackDefinedPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
     // see test/resources/storm_rest_api_jmx.json for values
-    Assert.assertEquals(28, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/tasks.total", "tasks.total")));
-    Assert.assertEquals(8, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/slots.total", "slots.total")));
-    Assert.assertEquals(5, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/slots.free", "slots.free")));
-    Assert.assertEquals(2, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/supervisors", "supervisors")));
-    Assert.assertEquals(28, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/executors.total", "executors.total")));
-    Assert.assertEquals(3, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/slots.used", "slots.used")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/topologies", "topologies")));
-    Assert.assertEquals(4637, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary/nimbus.uptime", "nimbus.uptime")));
+    Assert.assertEquals(28.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "tasks.total")));
+    Assert.assertEquals(8.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "slots.total")));
+    Assert.assertEquals(5.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "slots.free")));
+    Assert.assertEquals(2.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "supervisors")));
+    Assert.assertEquals(28.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "executors.total")));
+    Assert.assertEquals(3.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "slots.used")));
+    Assert.assertEquals(1.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "topologies")));
+    Assert.assertEquals(4637.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/api/cluster/summary", "nimbus.uptime")));
   }
 
   @Test
   public void testPopulateResources_NoRegionServer() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         null,
@@ -734,7 +749,7 @@ public class StackDefinedPropertyProviderTest {
     resource.setProperty(HOST_COMPONENT_STATE_PROPERTY_ID, "STARTED");
 
     int preSize = resource.getPropertiesMap().size();
-    
+
     // request with an empty set should get all supported properties
     Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
 
@@ -746,13 +761,15 @@ public class StackDefinedPropertyProviderTest {
   @Test
   public void testPopulateResources_HBaseMaster2() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    JMXPropertyProviderTest.TestJMXHostProvider hostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(false);
     TestGangliaHostProvider gangliaHostProvider = new TestGangliaHostProvider();
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
 
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         hostProvider,
         gangliaHostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -762,39 +779,41 @@ public class StackDefinedPropertyProviderTest {
         new EmptyPropertyProvider());
 
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    
+
     resource.setProperty("HostRoles/cluster_name", "c1");
     resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-0e-34-e1.compute-1.internal");
     resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "HBASE_MASTER");
     resource.setProperty(HOST_COMPONENT_STATE_PROPERTY_ID, "STARTED");
 
-    
+
     // request with an empty set should get all supported properties
     Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
 
     Set<Resource> res = propertyProvider.populateResources(Collections.singleton(resource), request, null);
     Assert.assertEquals(1, res.size());
-    
+
     Map<String, Map<String, Object>> map = res.iterator().next().getPropertiesMap();
 
     Assert.assertTrue(map.containsKey("metrics/hbase/master"));
     // uses 'tag.isActiveMaster' (name with a dot)
     Assert.assertTrue(map.get("metrics/hbase/master").containsKey("IsActiveMaster"));
-  }    
+  }
+
 
-  
   @Test
   public void testPopulateResources_params_category5() throws Exception {
     org.apache.ambari.server.controller.ganglia.TestStreamProvider streamProvider =
         new org.apache.ambari.server.controller.ganglia.TestStreamProvider("temporal_ganglia_data_yarn_queues.txt");
 
-    TestJMXHostProvider jmxHostProvider = new TestJMXHostProvider(true);
+    JMXPropertyProviderTest.TestJMXHostProvider jmxHostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(true);
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
-    
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
+
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         jmxHostProvider,
         hostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -802,8 +821,8 @@ public class StackDefinedPropertyProviderTest {
         PropertyHelper.getPropertyId("HostRoles", "state"),
         new EmptyPropertyProvider(),
         new EmptyPropertyProvider());
-    
-    
+
+
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
 
     resource.setProperty("HostRoles/cluster_name", "c1");
@@ -812,35 +831,37 @@ public class StackDefinedPropertyProviderTest {
 
     String RM_CATEGORY_1 = "metrics/yarn/Queue/root/default";
     String RM_AVAILABLE_MEMORY_PROPERTY = PropertyHelper.getPropertyId(RM_CATEGORY_1, "AvailableMB");
-    
+
     // only ask for one property
     Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
     temporalInfoMap.put(RM_CATEGORY_1, new TemporalInfoImpl(10L, 20L, 1L));
-    
+
     Request  request = PropertyHelper.getReadRequest(Collections.singleton(RM_CATEGORY_1), temporalInfoMap);
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-    
+
     List<String> metricsRegexes = new ArrayList<String>();
-    
+
     metricsRegexes.add("metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/");
 
     Assert.assertTrue(PropertyHelper.getProperties(resource).size() > 2);
     Assert.assertNotNull(resource.getPropertyValue(RM_AVAILABLE_MEMORY_PROPERTY));
-  }  
+  }
 
   @Test
   public void testPopulateResources_ganglia_JournalNode() throws Exception {
     org.apache.ambari.server.controller.ganglia.TestStreamProvider streamProvider =
         new org.apache.ambari.server.controller.ganglia.TestStreamProvider("journalnode_ganglia_data.txt");
 
-    TestJMXHostProvider jmxHostProvider = new TestJMXHostProvider(true);
+    JMXPropertyProviderTest.TestJMXHostProvider jmxHostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(true);
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
-    
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
+
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         jmxHostProvider,
         hostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -858,60 +879,60 @@ public class StackDefinedPropertyProviderTest {
 
 
     Object[][] testData = {
-      {"metrics", "boottime", 1378290058.0},
-      {"metrics/cpu", "cpu_aidle", 0.0},
-      {"metrics/cpu", "cpu_idle", 88.2},
-      {"metrics/cpu", "cpu_nice", 0.0},
-      {"metrics/cpu", "cpu_num", 2.0},
-      {"metrics/cpu", "cpu_speed", 3583.0},
-      {"metrics/cpu", "cpu_system", 8.4},
-      {"metrics/cpu", "cpu_user", 3.3},
-      {"metrics/cpu", "cpu_wio", 0.1},
-      {"metrics/disk", "disk_free", 92.428},
-      {"metrics/disk", "disk_total", 101.515},
-      {"metrics/disk", "part_max_used", 12.8},
-      {"metrics/load", "load_fifteen", 0.026},
-      {"metrics/load", "load_five", 0.114},
-      {"metrics/load", "load_one", 0.226},
-      {"metrics/memory", "mem_buffers", 129384.0},
-      {"metrics/memory", "mem_cached", 589576.0},
-      {"metrics/memory", "mem_free", 1365496.0},
-      {"metrics/memory", "mem_shared", 0.0},
-      {"metrics/memory", "mem_total", 4055144.0},
-      {"metrics/memory", "swap_free", 4128760.0},
-      {"metrics/memory", "swap_total", 4128760.0},
-      {"metrics/network", "bytes_in", 22547.48},
-      {"metrics/network", "bytes_out", 5772.33},
-      {"metrics/network", "pkts_in", 24.0},
-      {"metrics/network", "pkts_out", 35.4},
-      {"metrics/process", "proc_run", 4.0},
-      {"metrics/process", "proc_total", 657.0},
-      {"metrics/dfs/journalNode", "batchesWritten", 0.0},
-      {"metrics/dfs/journalNode", "batchesWrittenWhileLagging", 0.0},
-      {"metrics/dfs/journalNode", "bytesWritten", 0.0},
-      {"metrics/dfs/journalNode", "currentLagTxns", 0.0},
-      {"metrics/dfs/journalNode", "lastPromisedEpoch", 5.0},
-      {"metrics/dfs/journalNode", "lastWriterEpoch", 5.0},
-      {"metrics/dfs/journalNode", "lastWrittenTxId", 613.0},
-      {"metrics/dfs/journalNode", "syncs60s50thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs60s75thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs60s90thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs60s95thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs60s99thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs60s_num_ops", 0.0},
-      {"metrics/dfs/journalNode", "syncs300s50thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs300s75thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs300s90thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs300s95thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs300s99thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs300s_num_ops", 0.0},
-      {"metrics/dfs/journalNode", "syncs3600s50thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs3600s75thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs3600s90thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs3600s95thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs3600s99thPercentileLatencyMicros", 0.0},
-      {"metrics/dfs/journalNode", "syncs3600s_num_ops", 0.0},
-      {"metrics/dfs/journalNode", "txnsWritten", 0.0}
+        {"metrics", "boottime", 1378290058.0},
+        {"metrics/cpu", "cpu_aidle", 0.0},
+        {"metrics/cpu", "cpu_idle", 88.2},
+        {"metrics/cpu", "cpu_nice", 0.0},
+        {"metrics/cpu", "cpu_num", 2.0},
+        {"metrics/cpu", "cpu_speed", 3583.0},
+        {"metrics/cpu", "cpu_system", 8.4},
+        {"metrics/cpu", "cpu_user", 3.3},
+        {"metrics/cpu", "cpu_wio", 0.1},
+        {"metrics/disk", "disk_free", 92.428},
+        {"metrics/disk", "disk_total", 101.515},
+        {"metrics/disk", "part_max_used", 12.8},
+        {"metrics/load", "load_fifteen", 0.026},
+        {"metrics/load", "load_five", 0.114},
+        {"metrics/load", "load_one", 0.226},
+        {"metrics/memory", "mem_buffers", 129384.0},
+        {"metrics/memory", "mem_cached", 589576.0},
+        {"metrics/memory", "mem_free", 1365496.0},
+        {"metrics/memory", "mem_shared", 0.0},
+        {"metrics/memory", "mem_total", 4055144.0},
+        {"metrics/memory", "swap_free", 4128760.0},
+        {"metrics/memory", "swap_total", 4128760.0},
+        {"metrics/network", "bytes_in", 22547.48},
+        {"metrics/network", "bytes_out", 5772.33},
+        {"metrics/network", "pkts_in", 24.0},
+        {"metrics/network", "pkts_out", 35.4},
+        {"metrics/process", "proc_run", 4.0},
+        {"metrics/process", "proc_total", 657.0},
+        {"metrics/dfs/journalNode", "batchesWritten", 0.0},
+        {"metrics/dfs/journalNode", "batchesWrittenWhileLagging", 0.0},
+        {"metrics/dfs/journalNode", "bytesWritten", 0.0},
+        {"metrics/dfs/journalNode", "currentLagTxns", 0.0},
+        {"metrics/dfs/journalNode", "lastPromisedEpoch", 5.0},
+        {"metrics/dfs/journalNode", "lastWriterEpoch", 5.0},
+        {"metrics/dfs/journalNode", "lastWrittenTxId", 613.0},
+        {"metrics/dfs/journalNode", "syncs60s50thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs60s75thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs60s90thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs60s95thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs60s99thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs60s_num_ops", 0.0},
+        {"metrics/dfs/journalNode", "syncs300s50thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs300s75thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs300s90thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs300s95thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs300s99thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs300s_num_ops", 0.0},
+        {"metrics/dfs/journalNode", "syncs3600s50thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs3600s75thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs3600s90thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs3600s95thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs3600s99thPercentileLatencyMicros", 0.0},
+        {"metrics/dfs/journalNode", "syncs3600s_num_ops", 0.0},
+        {"metrics/dfs/journalNode", "txnsWritten", 0.0}
     };
 
     Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
@@ -940,29 +961,31 @@ public class StackDefinedPropertyProviderTest {
     for (String property : properties) {
       Assert.assertEquals(testData[i++][2], resource.getPropertyValue(property));
     }
-  }  
+  }
 
   @Test
   public void testPopulateResources_resourcemanager_clustermetrics() throws Exception {
-    
+
     String[] metrics = new String[] {
-      "metrics/yarn/ClusterMetrics/NumActiveNMs",
-      "metrics/yarn/ClusterMetrics/NumDecommissionedNMs",
-      "metrics/yarn/ClusterMetrics/NumLostNMs",
-      "metrics/yarn/ClusterMetrics/NumUnhealthyNMs",
-      "metrics/yarn/ClusterMetrics/NumRebootedNMs"
+        "metrics/yarn/ClusterMetrics/NumActiveNMs",
+        "metrics/yarn/ClusterMetrics/NumDecommissionedNMs",
+        "metrics/yarn/ClusterMetrics/NumLostNMs",
+        "metrics/yarn/ClusterMetrics/NumUnhealthyNMs",
+        "metrics/yarn/ClusterMetrics/NumRebootedNMs"
     };
-    
+
     org.apache.ambari.server.controller.ganglia.TestStreamProvider streamProvider =
         new org.apache.ambari.server.controller.ganglia.TestStreamProvider("yarn_ganglia_data.txt");
 
-    TestJMXHostProvider jmxHostProvider = new TestJMXHostProvider(true);
+    JMXPropertyProviderTest.TestJMXHostProvider jmxHostProvider = new JMXPropertyProviderTest.TestJMXHostProvider(true);
+    JMXPropertyProviderTest.TestMetricsHostProvider metricsHostProvider = new JMXPropertyProviderTest.TestMetricsHostProvider();
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
-    
+
     StackDefinedPropertyProvider propertyProvider = new StackDefinedPropertyProvider(
         Resource.Type.HostComponent,
         jmxHostProvider,
         hostProvider,
+        metricsHostProvider,
         streamProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
@@ -977,19 +1000,19 @@ public class StackDefinedPropertyProviderTest {
       resource.setProperty("HostRoles/cluster_name", "c1");
       resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "ip-10-39-113-33.ec2.internal");
       resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "RESOURCEMANAGER");
-      
+
       // only ask for one property
       Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
       temporalInfoMap.put(metric, new TemporalInfoImpl(10L, 20L, 1L));
       Request  request = PropertyHelper.getReadRequest(Collections.singleton(metric), temporalInfoMap);
 
       Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-      
+
       Assert.assertEquals(4, PropertyHelper.getProperties(resource).size());
       Assert.assertNotNull(resource.getPropertyValue(metric));
-      
+
     }
-    
-  }  
-  
+
+  }
+
 }

+ 45 - 36
ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java → ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java

@@ -16,9 +16,12 @@
  * limitations under the License.
  */
 
-package org.apache.ambari.server.controller.jmx;
+package org.apache.ambari.server.controller.metrics;
 
 import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.jmx.JMXHostProvider;
+import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
+import org.apache.ambari.server.controller.jmx.TestStreamProvider;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
@@ -27,11 +30,7 @@ import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 
 
 /**
@@ -48,16 +47,17 @@ public class JMXPropertyProviderTest {
   public void testPopulateResources() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
         PropertyHelper.getPropertyId("HostRoles", "component_name"),
-        PropertyHelper.getPropertyId("HostRoles", "state"),
-        Collections.singleton("STARTED"));
+        PropertyHelper.getPropertyId("HostRoles", "state"));
 
     // namenode
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
@@ -71,7 +71,7 @@ public class JMXPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "50070"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http", "domu-12-31-39-0e-34-e1.compute-1.internal", "50070", "/jmx"), streamProvider.getLastSpec());
 
     // see test/resources/hdfs_namenode_jmx.json for values
     Assert.assertEquals(13670605,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
@@ -96,7 +96,7 @@ public class JMXPropertyProviderTest {
 
     propertyProvider.populateResources(Collections.singleton(resource), request, null);
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "50075"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http", "domu-12-31-39-14-ee-b3.compute-1.internal", "50075", "/jmx"), streamProvider.getLastSpec());
 
     // see test/resources/hdfs_datanode_jmx.json for values
     Assert.assertEquals(856,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
@@ -130,7 +130,7 @@ public class JMXPropertyProviderTest {
 
     propertyProvider.populateResources(Collections.singleton(resource), request, null);
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "50030"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http", "domu-12-31-39-14-ee-b3.compute-1.internal", "50030", "/jmx"), streamProvider.getLastSpec());
 
     // see test/resources/mapreduce_jobtracker_jmx.json for values
     Assert.assertEquals(13, PropertyHelper.getProperties(resource).size());
@@ -177,7 +177,7 @@ public class JMXPropertyProviderTest {
 
     propertyProvider.populateResources(Collections.singleton(resource), request, null);
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "50060"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http", "domu-12-31-39-14-ee-b3.compute-1.internal", "50060", "/jmx"), streamProvider.getLastSpec());
 
     Assert.assertEquals(18, PropertyHelper.getProperties(resource).size());
     Assert.assertEquals(954466304, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
@@ -218,7 +218,7 @@ public class JMXPropertyProviderTest {
 
     propertyProvider.populateResources(Collections.singleton(resource), request, null);
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "60010"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http", "domu-12-31-39-14-ee-b3.compute-1.internal", "60010", "/jmx"), streamProvider.getLastSpec());
 
     Assert.assertEquals(8, PropertyHelper.getProperties(resource).size());
     Assert.assertEquals(1069416448, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
@@ -234,16 +234,17 @@ public class JMXPropertyProviderTest {
   public void testPopulateResources_singleProperty() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
         PropertyHelper.getPropertyId("HostRoles", "component_name"),
-        PropertyHelper.getPropertyId("HostRoles", "state"),
-        Collections.singleton("STARTED"));
+        PropertyHelper.getPropertyId("HostRoles", "state"));
 
     // namenode
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
@@ -258,7 +259,7 @@ public class JMXPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "50070"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http", "domu-12-31-39-0e-34-e1.compute-1.internal", "50070", "/jmx"), streamProvider.getLastSpec());
 
     // see test/resources/hdfs_namenode_jmx.json for values
     Assert.assertEquals(13670605,  resource.getPropertyValue("metrics/rpc/ReceivedBytes"));
@@ -269,16 +270,17 @@ public class JMXPropertyProviderTest {
   public void testPopulateResources_category() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
         PropertyHelper.getPropertyId("HostRoles", "component_name"),
-        PropertyHelper.getPropertyId("HostRoles", "state"),
-        Collections.singleton("STARTED"));
+        PropertyHelper.getPropertyId("HostRoles", "state"));
 
     // namenode
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
@@ -294,7 +296,7 @@ public class JMXPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "50070"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http","domu-12-31-39-0e-34-e1.compute-1.internal", "50070","/jmx"), streamProvider.getLastSpec());
 
     // see test/resources/hdfs_namenode_jmx.json for values
     Assert.assertEquals(184320,  resource.getPropertyValue("metrics/dfs/FSNamesystem/CapacityUsed"));
@@ -306,16 +308,17 @@ public class JMXPropertyProviderTest {
   public void testPopulateResourcesWithUnknownPort() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
         PropertyHelper.getPropertyId("HostRoles", "component_name"),
-        PropertyHelper.getPropertyId("HostRoles", "state"),
-        Collections.singleton("STARTED"));
+        PropertyHelper.getPropertyId("HostRoles", "state"));
 
     // namenode
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
@@ -328,7 +331,7 @@ public class JMXPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "50070"), streamProvider.getLastSpec());
+    Assert.assertEquals(propertyProvider.getSpec("http","domu-12-31-39-0e-34-e1.compute-1.internal", "50070","/jmx"), streamProvider.getLastSpec());
 
     // see test/resources/hdfs_namenode_jmx.json for values
     Assert.assertEquals(13670605,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
@@ -343,16 +346,17 @@ public class JMXPropertyProviderTest {
   public void testPopulateResourcesUnhealthyResource() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
         PropertyHelper.getPropertyId("HostRoles", "component_name"),
-        PropertyHelper.getPropertyId("HostRoles", "state"),
-        Collections.singleton("STARTED"));
+        PropertyHelper.getPropertyId("HostRoles", "state"));
 
     // namenode
     Resource resource = new ResourceImpl(Resource.Type.HostComponent);
@@ -375,17 +379,18 @@ public class JMXPropertyProviderTest {
     // Set the provider to take 50 millis to return the JMX values
     TestStreamProvider  streamProvider = new TestStreamProvider(50L);
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
     Set<Resource> resources = new HashSet<Resource>();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
         PropertyHelper.getPropertyId("HostRoles", "host_name"),
         PropertyHelper.getPropertyId("HostRoles", "component_name"),
-        PropertyHelper.getPropertyId("HostRoles", "state"),
-        Collections.singleton("STARTED"));
+        PropertyHelper.getPropertyId("HostRoles", "state"));
 
     for (int i = 0; i < NUMBER_OF_RESOURCES; ++i) {
       // datanode
@@ -420,17 +425,18 @@ public class JMXPropertyProviderTest {
     // Set the provider to take 100 millis to return the JMX values
     TestStreamProvider  streamProvider = new TestStreamProvider(100L);
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
+    TestMetricsHostProvider metricsHostProvider = new TestMetricsHostProvider();
     Set<Resource> resources = new HashSet<Resource>();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
         PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
         streamProvider,
         hostProvider,
+        metricsHostProvider,
         "HostRoles/cluster_name",
         "HostRoles/host_name",
         "HostRoles/component_name",
-        "HostRoles/state",
-        Collections.singleton("STARTED"));
+        "HostRoles/state");
 
     // set the provider timeout to 50 millis
     propertyProvider.setPopulateTimeout(50L);
@@ -468,11 +474,6 @@ public class JMXPropertyProviderTest {
       this.unknownPort = unknownPort;
     }
 
-    @Override
-    public String getHostName(String clusterName, String componentName) {
-      return null;
-    }
-
     @Override
     public Set<String> getHostNames(String clusterName, String componentName) {
       return null;
@@ -480,12 +481,12 @@ public class JMXPropertyProviderTest {
 
     @Override
     public String getPort(String clusterName, String componentName) throws
-      SystemException {
+        SystemException {
 
       if (unknownPort) {
         return null;
       }
-      
+
       if (componentName.equals("NAMENODE"))
         return "50070";
       else if (componentName.equals("DATANODE"))
@@ -508,6 +509,14 @@ public class JMXPropertyProviderTest {
     public String getJMXProtocol(String clusterName, String componentName) {
       return "http";
     }
-    
+
+  }
+
+  public static class TestMetricsHostProvider implements MetricsHostProvider {
+
+    @Override
+    public String getHostName(String clusterName, String componentName) {
+      return null;
+    }
   }
 }

+ 114 - 9
ambari-server/src/test/python/TestAmbariServer.py

@@ -4831,17 +4831,19 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     get_ambari_properties_mock.return_value = properties
     get_validated_string_input_mock.side_effect = ['admin', 'admin']
 
-    u = MagicMock()
-    u.getcode.side_effect = [201, 200, 200]
-    u.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
+    response = MagicMock()
+    response.getcode.side_effect = [201, 200, 200]
+    response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
                           '{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
                           '{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
 
-    urlopen_mock.return_value = u
+    urlopen_mock.return_value = response
 
     ambari_server.sync_ldap()
-    pass
 
+    self.assertTrue(response.getcode.called)
+    self.assertTrue(response.read.called)
+    pass
 
   @patch("urllib2.urlopen")
   @patch.object(ambari_server, "get_validated_string_input")
@@ -4858,12 +4860,12 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     get_ambari_properties_mock.return_value = properties
     get_validated_string_input_mock.side_effect = ['admin', 'admin']
 
-    u = MagicMock()
-    u.getcode.side_effect = [201, 200]
-    u.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
+    response = MagicMock()
+    response.getcode.side_effect = [201, 200]
+    response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
                           '{"Event":{"status" : "ERROR","status_detail" : "Error!!","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}']
 
-    urlopen_mock.return_value = u
+    urlopen_mock.return_value = response
 
     try:
       ambari_server.sync_ldap()
@@ -4871,6 +4873,109 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
     except FatalException as e:
       pass
 
+  @patch("urllib2.urlopen")
+  @patch("urllib2.Request")
+  @patch("base64.encodestring")
+  @patch.object(ambari_server, 'is_root')
+  @patch.object(ambari_server, 'is_server_runing')
+  @patch.object(ambari_server, 'get_ambari_properties')
+  @patch.object(ambari_server, 'get_validated_string_input')
+  def test_sync_ldap_forbidden(self, get_validated_string_input_method, get_ambari_properties_method,
+                                is_server_runing_method, is_root_method,
+                                encodestring_method, request_constructor, urlopen_method):
+
+    is_root_method.return_value = False
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if not root")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("root-level" in fe.reason)
+      pass
+    is_root_method.return_value = True
+
+    is_server_runing_method.return_value = (None, None)
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if ambari is stopped")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("not running" in fe.reason)
+      pass
+    is_server_runing_method.return_value = (True, None)
+
+    configs = MagicMock()
+    configs.get_property.return_value = None
+    get_ambari_properties_method.return_value = configs
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if ldap is not configured")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("not configured" in fe.reason)
+      pass
+    configs.get_property.return_value = 'true'
+
+    get_validated_string_input_method.return_value = 'admin'
+    encodestring_method.return_value = 'qwe123'
+
+    requestMocks = [MagicMock()]
+    request_constructor.side_effect = requestMocks
+    response = MagicMock()
+    response.getcode.return_value = 403
+    urlopen_method.return_value = response
+
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if return code != 200")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("status code" in fe.reason)
+      pass
+
+  @patch.object(ambari_server, 'is_root')
+  def test_sync_ldap_ambari_stopped(self, is_root_method):
+    is_root_method.return_value = False
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if not root")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("root-level" in fe.reason)
+      pass
+
+  @patch.object(ambari_server, 'is_root')
+  @patch.object(ambari_server, 'is_server_runing')
+  def test_sync_ldap_ambari_stopped(self, is_server_runing_method, is_root_method):
+    is_root_method.return_value = True
+    is_server_runing_method.return_value = (None, None)
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if ambari is stopped")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("not running" in fe.reason)
+      pass
+
+  @patch.object(ambari_server, 'is_root')
+  @patch.object(ambari_server, 'is_server_runing')
+  @patch.object(ambari_server, 'get_ambari_properties')
+  def test_sync_ldap_not_configured(self, get_ambari_properties_method,
+                     is_server_runing_method, is_root_method):
+    is_root_method.return_value = True
+    is_server_runing_method.return_value = (True, None)
+
+    configs = MagicMock()
+    configs.get_property.return_value = None
+    get_ambari_properties_method.return_value = configs
+    try:
+      ambari_server.sync_ldap()
+      self.fail("Should throw exception if ldap is not configured")
+    except FatalException as fe:
+      # Expected
+      self.assertTrue("not configured" in fe.reason)
+      pass
+
   @patch.object(ambari_server, 'read_password')
   def test_configure_ldap_password(self, read_password_method):
     out = StringIO.StringIO()

+ 2 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/default.json

@@ -371,7 +371,8 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
         },
         "cluster-env": {
             "security_enabled": "false",

+ 2 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/secured.json

@@ -557,7 +557,8 @@
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
         }, 
         "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 

+ 1 - 5
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -103,11 +103,7 @@ class TestHBaseMaster(RMFTestCase):
                               content = StaticFile('draining_servers.rb'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
-                              logoutput = True,
-                              user = 'hbase',
-                              )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove host1',
                               logoutput = True,
                               user = 'hbase',
                               )

+ 136 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py

@@ -18,6 +18,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 from ambari_commons import OSCheck
+import json
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 
@@ -56,6 +57,7 @@ class TestDatanode(RMFTestCase):
                               )
     self.assertNoMoreResources()
 
+  @patch("os.path.exists", new = MagicMock(return_value=False))
   def test_stop_default(self):
     self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
                        classname = "DataNode",
@@ -115,6 +117,70 @@ class TestDatanode(RMFTestCase):
                               )
     self.assertNoMoreResources()
 
+  def test_start_secured_HDP22_root(self):
+    config_file = "stacks/2.0.6/configs/secured.json"
+    with open(config_file, "r") as f:
+      secured_json = json.load(f)
+
+    secured_json['hostLevelParams']['stack_version']= '2.2'
+
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "start",
+                       config_dict = secured_json
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - root -c \'export HADOOP_SECURE_DN_PID_DIR=/var/run/hadoop/hdfs && export HADOOP_SECURE_DN_LOG_DIR=/var/log/hadoop/hdfs && export HADOOP_SECURE_DN_USER=hdfs && export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_secured_HDP22_non_root_https_only(self):
+    config_file="stacks/2.0.6/configs/secured.json"
+    with open(config_file, "r") as f:
+      secured_json = json.load(f)
+
+    secured_json['hostLevelParams']['stack_version']= '2.2'
+    secured_json['configurations']['hdfs-site']['dfs.http.policy']= 'HTTPS_ONLY'
+    secured_json['configurations']['hdfs-site']['dfs.datanode.address']= '0.0.0.0:10000'
+    secured_json['configurations']['hdfs-site']['dfs.datanode.https.address']= '0.0.0.0:50000'
+
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "start",
+                       config_dict = secured_json
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              )
+    self.assertNoMoreResources()
+
+  @patch("os.path.exists", new = MagicMock(return_value=False))
   def test_stop_secured(self):
     self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
                        classname = "DataNode",
@@ -141,6 +207,76 @@ class TestDatanode(RMFTestCase):
                               )
     self.assertNoMoreResources()
 
+
+  @patch("os.path.exists", new = MagicMock(return_value=False))
+  def test_stop_secured_HDP22_root(self):
+    config_file = "stacks/2.0.6/configs/secured.json"
+    with open(config_file, "r") as f:
+      secured_json = json.load(f)
+
+    secured_json['hostLevelParams']['stack_version']= '2.2'
+
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "stop",
+                       config_dict = secured_json
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - root -c \'export HADOOP_SECURE_DN_PID_DIR=/var/run/hadoop/hdfs && export HADOOP_SECURE_DN_LOG_DIR=/var/log/hadoop/hdfs && export HADOOP_SECURE_DN_USER=hdfs && export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
+                              not_if = None,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              )
+    self.assertNoMoreResources()
+
+  @patch("os.path.exists", new = MagicMock(return_value=False))
+  def test_stop_secured_HDP22_non_root_https_only(self):
+    config_file = "stacks/2.0.6/configs/secured.json"
+    with open(config_file, "r") as f:
+      secured_json = json.load(f)
+
+    secured_json['hostLevelParams']['stack_version']= '2.2'
+    secured_json['configurations']['hdfs-site']['dfs.http.policy']= 'HTTPS_ONLY'
+    secured_json['configurations']['hdfs-site']['dfs.datanode.address']= '0.0.0.0:10000'
+    secured_json['configurations']['hdfs-site']['dfs.datanode.https.address']= '0.0.0.0:50000'
+
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "stop",
+                       config_dict = secured_json
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
+                              not_if=None,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action=['delete'],
+                              )
+    self.assertNoMoreResources()
+
   def assert_configure_default(self):
     self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
                               content = Template('hdfs.conf.j2'),

+ 8 - 8
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py

@@ -94,10 +94,6 @@ class TestHiveClient(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
-    self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
-        environment = {'no_proxy': 'c6401.ambari.apache.org'},
-        not_if = '[ -f DBConnectionVerification.jar]',
-    )
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
                               group = 'hadoop',
                               conf_dir = '/etc/hive/conf',
@@ -111,6 +107,10 @@ class TestHiveClient(RMFTestCase):
                               owner = 'hive',
                               group = 'hadoop',
                               )
+    self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
+        environment = {'no_proxy': 'c6401.ambari.apache.org'},
+        not_if = '[ -f DBConnectionVerification.jar]',
+    )
     self.assertNoMoreResources()
 
 
@@ -187,10 +187,6 @@ class TestHiveClient(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
-    self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
-        environment = {'no_proxy': 'c6401.ambari.apache.org'},
-        not_if = '[ -f DBConnectionVerification.jar]',
-    )
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
                               group = 'hadoop',
                               conf_dir = '/etc/hive/conf',
@@ -204,4 +200,8 @@ class TestHiveClient(RMFTestCase):
                               owner = 'hive',
                               group = 'hadoop',
                               )
+    self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
+        environment = {'no_proxy': 'c6401.ambari.apache.org'},
+        not_if = '[ -f DBConnectionVerification.jar]',
+    )
     self.assertNoMoreResources()

+ 26 - 26
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py

@@ -173,6 +173,19 @@ class TestHiveMetastore(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hive/conf.server',
+                              mode = 0644,
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
+                              owner = 'hive',
+                              configurations = self.getConfig()['configurations']['hive-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+                              owner = 'hive',
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin" + os.pathsep + "/usr/bin"},
@@ -205,19 +218,6 @@ class TestHiveMetastore(RMFTestCase):
         mode = 0755,
         recursive = True,
     )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hive/conf.server',
-                              mode = 0644,
-                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
-                              owner = 'hive',
-                              configurations = self.getConfig()['configurations']['hive-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
-                              owner = 'hive',
-                              group = 'hadoop',
-                              )
 
   def assert_configure_secured(self):
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
@@ -286,6 +286,19 @@ class TestHiveMetastore(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hive/conf.server',
+                              mode = 0644,
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
+                              owner = 'hive',
+                              configurations = self.getConfig()['configurations']['hive-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+                              owner = 'hive',
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
@@ -318,16 +331,3 @@ class TestHiveMetastore(RMFTestCase):
         mode = 0755,
         recursive = True,
     )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hive/conf.server',
-                              mode = 0644,
-                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
-                              owner = 'hive',
-                              configurations = self.getConfig()['configurations']['hive-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
-                              owner = 'hive',
-                              group = 'hadoop',
-                              )

+ 26 - 26
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -282,6 +282,19 @@ class TestHiveServer(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hive/conf.server',
+                              mode = 0644,
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
+                              owner = 'hive',
+                              configurations = self.getConfig()['configurations']['hive-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+                              owner = 'hive',
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
@@ -314,19 +327,6 @@ class TestHiveServer(RMFTestCase):
         mode = 0755,
         recursive = True,
     )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hive/conf.server',
-                              mode = 0644,
-                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
-                              owner = 'hive',
-                              configurations = self.getConfig()['configurations']['hive-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
-                              owner = 'hive',
-                              group = 'hadoop',
-                              )
 
   def assert_configure_secured(self):
     self.assertResourceCalled('HdfsDirectory', '/apps/hive/warehouse',
@@ -426,6 +426,19 @@ class TestHiveServer(RMFTestCase):
         group = 'hadoop',
         mode = 0644,
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hive/conf.server',
+                              mode = 0644,
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
+                              owner = 'hive',
+                              configurations = self.getConfig()['configurations']['hive-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+                              owner = 'hive',
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
@@ -458,19 +471,6 @@ class TestHiveServer(RMFTestCase):
         mode = 0755,
         recursive = True,
     )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hive/conf.server',
-                              mode = 0644,
-                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
-                              owner = 'hive',
-                              configurations = self.getConfig()['configurations']['hive-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
-                              owner = 'hive',
-                              group = 'hadoop',
-                              )
 
   @patch("hive_service.check_fs_root")
   @patch("time.time")

+ 0 - 30
ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py

@@ -199,26 +199,6 @@ class TestHistoryServer(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,
@@ -396,16 +376,6 @@ class TestHistoryServer(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,

+ 0 - 30
ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py

@@ -54,26 +54,6 @@ class TestMapReduce2Client(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,
@@ -192,16 +172,6 @@ class TestMapReduce2Client(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,

+ 30 - 30
ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py

@@ -176,6 +176,26 @@ class TestNodeManager(RMFTestCase):
                               bin_dir = '/usr/bin',
                               action = ['create'],
                               )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              owner = 'yarn',
+                              recursive = True,
+                              ignore_failures = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
+                              owner = 'yarn',
+                              recursive = True,
+                              ignore_failures = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              owner = 'yarn',
+                              recursive = True,
+                              ignore_failures = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
+                              owner = 'yarn',
+                              recursive = True,
+                              ignore_failures = True,
+                              )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -196,26 +216,6 @@ class TestNodeManager(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,
@@ -373,6 +373,16 @@ class TestNodeManager(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              owner = 'yarn',
+                              recursive = True,
+                              ignore_failures = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              owner = 'yarn',
+                              recursive = True,
+                              ignore_failures = True,
+                              )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -393,16 +403,6 @@ class TestNodeManager(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,

+ 0 - 30
ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py

@@ -169,26 +169,6 @@ class TestResourceManager(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,
@@ -305,16 +285,6 @@ class TestResourceManager(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,

+ 0 - 50
ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py

@@ -55,26 +55,6 @@ class TestYarnClient(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,
@@ -193,16 +173,6 @@ class TestYarnClient(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,
@@ -338,26 +308,6 @@ class TestYarnClient(RMFTestCase):
       group = 'hadoop',
       recursive = True,
     )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log1',
-      owner = 'yarn',
-      recursive = True,
-      ignore_failures = True,
-    )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
       owner = 'yarn',
       recursive = True,

+ 2 - 1
ambari-server/src/test/python/stacks/2.0.6/configs/default.json

@@ -436,7 +436,8 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

+ 2 - 1
ambari-server/src/test/python/stacks/2.0.6/configs/secured.json

@@ -491,7 +491,8 @@
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
             "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

+ 2 - 0
ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py

@@ -31,6 +31,7 @@ class TestFalconServer(RMFTestCase):
     )
     self.assert_configure_default()
     self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon-start -port 15000',
+                              path = ['/usr/bin'],
                               user = 'falcon',
                               )
     self.assertNoMoreResources()
@@ -42,6 +43,7 @@ class TestFalconServer(RMFTestCase):
                        config_file="default.json"
     )
     self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon-stop',
+                              path = ['/usr/bin'],
                               user = 'falcon',
                               )
     self.assertResourceCalled('File', '/var/run/falcon/falcon.pid',

+ 26 - 26
ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py

@@ -151,6 +151,19 @@ class TestHiveMetastore(RMFTestCase):
         owner = 'hive',
         group = 'hadoop',
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hive/conf.server',
+                              mode = 0644,
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
+                              owner = 'hive',
+                              configurations = self.getConfig()['configurations']['hive-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+                              owner = 'hive',
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
@@ -186,19 +199,6 @@ class TestHiveMetastore(RMFTestCase):
         mode = 0755,
         recursive = True,
     )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hive/conf.server',
-                              mode = 0644,
-                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
-                              owner = 'hive',
-                              configurations = self.getConfig()['configurations']['hive-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
-                              owner = 'hive',
-                              group = 'hadoop',
-                              )
 
   def assert_configure_secured(self):
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
@@ -243,6 +243,19 @@ class TestHiveMetastore(RMFTestCase):
         owner = 'hive',
         group = 'hadoop',
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hive/conf.server',
+                              mode = 0644,
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
+                              owner = 'hive',
+                              configurations = self.getConfig()['configurations']['hive-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+                              owner = 'hive',
+                              group = 'hadoop',
+                              )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
@@ -278,16 +291,3 @@ class TestHiveMetastore(RMFTestCase):
         mode = 0755,
         recursive = True,
     )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hive/conf.server',
-                              mode = 0644,
-                              configuration_attributes = self.getConfig()['configuration_attributes']['hive-site'],
-                              owner = 'hive',
-                              configurations = self.getConfig()['configurations']['hive-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-                              content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
-                              owner = 'hive',
-                              group = 'hadoop',
-                              )

Неке датотеке нису приказане због велике количине промена