Bläddra i källkod

Merge branch 'trunk' into branch-alerts-dev

Nate Cole 11 år sedan
förälder
incheckning
e508fe5a33
100 ändrade filer med 1447 tillägg och 610 borttagningar
  1. 40 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/NavbarCtrl.js
  2. 12 2
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js
  3. 18 0
      ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
  4. 36 3
      ambari-admin/src/main/resources/ui/admin-web/app/views/leftNavbar.html
  5. 6 0
      ambari-agent/conf/unix/install-helper.sh
  6. 1 1
      ambari-agent/pom.xml
  7. 7 0
      ambari-agent/src/main/package/rpm/posttrans_agent.sh
  8. 1 4
      ambari-client/groovy-client/src/main/resources/blueprints/hdp-multinode-default
  9. 1 1
      ambari-client/groovy-client/src/main/resources/blueprints/hdp-singlenode-default
  10. 3 9
      ambari-client/groovy-client/src/main/resources/blueprints/lambda-architecture
  11. 3 3
      ambari-client/groovy-client/src/main/resources/blueprints/multi-node-hdfs-yarn
  12. 1 1
      ambari-client/groovy-client/src/main/resources/blueprints/single-node-hdfs-yarn
  13. 4 4
      ambari-client/groovy-client/src/main/resources/blueprints/warmup
  14. 6 0
      ambari-server/conf/unix/install-helper.sh
  15. 25 0
      ambari-server/pom.xml
  16. 1 7
      ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
  17. 36 0
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  18. 19 5
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  19. 1 9
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
  20. 5 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
  21. 14 8
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewVersionResourceProvider.java
  22. 6 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProvider.java
  23. 49 6
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java
  24. 11 7
      ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java
  25. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsService.java
  26. 132 68
      ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
  27. 7 0
      ambari-server/src/main/package/rpm/posttrans_server.sh
  28. 5 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/configuration/cluster-env.xml
  29. 2 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
  30. 1 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py
  31. 5 6
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
  32. 11 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
  33. 62 59
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
  34. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
  35. 5 6
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
  36. 1 31
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
  37. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
  38. 24 7
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
  39. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
  40. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
  41. 2 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
  42. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
  43. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
  44. 5 6
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py
  45. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py
  46. 4 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py
  47. 5 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
  48. 5 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  49. 25 23
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
  50. 1 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
  51. 5 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
  52. 7 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  53. 63 61
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
  54. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
  55. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
  56. 5 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
  57. 0 27
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
  58. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  59. 33 9
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
  60. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
  61. 32 30
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
  62. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
  63. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
  64. 4 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
  65. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
  66. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/mapred-site.xml
  67. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/application_timeline_server.py
  68. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py
  69. 13 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
  70. 18 12
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
  71. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py
  72. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
  73. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
  74. 12 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
  75. 2 5
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json
  76. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json
  77. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py
  78. 3 4
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py
  79. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py
  80. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
  81. 2 5
      ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json
  82. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json
  83. 4 5
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
  84. 3 4
      ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
  85. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/configuration/tez-site.xml
  86. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
  87. 8 0
      ambari-server/src/main/resources/stacks/HDP/2.2.1/services/YARN/metainfo.xml
  88. 27 0
      ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
  89. 15 4
      ambari-server/src/test/java/org/apache/ambari/server/configuration/ConfigurationTest.java
  90. 86 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
  91. 4 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AmbariPrivilegeResourceProviderTest.java
  92. 170 5
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
  93. 4 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ViewPrivilegeResourceProviderTest.java
  94. 2 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java
  95. 29 1
      ambari-server/src/test/java/org/apache/ambari/server/orm/entities/ViewEntityTest.java
  96. 97 0
      ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariAuthorizationProviderDisableUserTest.java
  97. 2 0
      ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestAmbariLdapAuthoritiesPopulator.java
  98. 42 10
      ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java
  99. 36 0
      ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_client.py
  100. 36 0
      ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_metastore.py

+ 40 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/NavbarCtrl.js

@@ -20,12 +20,51 @@
 angular.module('ambariAdminConsole')
 angular.module('ambariAdminConsole')
 .controller('NavbarCtrl',['$scope', 'Cluster', '$location', 'uiAlert', 'ROUTES', 'LDAP', 'ConfirmationModal', '$rootScope', function($scope, Cluster, $location, uiAlert, ROUTES, LDAP, ConfirmationModal, $rootScope) {
 .controller('NavbarCtrl',['$scope', 'Cluster', '$location', 'uiAlert', 'ROUTES', 'LDAP', 'ConfirmationModal', '$rootScope', function($scope, Cluster, $location, uiAlert, ROUTES, LDAP, ConfirmationModal, $rootScope) {
   $scope.cluster = null;
   $scope.cluster = null;
+  $scope.editCluster = {
+    name        : '',
+    editingName : false
+  };
+
   Cluster.getStatus().then(function(cluster) {
   Cluster.getStatus().then(function(cluster) {
     $scope.cluster = cluster;
     $scope.cluster = cluster;
   }).catch(function(data) {
   }).catch(function(data) {
   	uiAlert.danger(data.status, data.message);
   	uiAlert.danger(data.status, data.message);
   });
   });
 
 
+  $scope.toggleEditName = function($event) {
+    if ($event && $event.keyCode !== 27) {
+      // 27 = Escape key
+      return false;
+    }
+
+    $scope.editCluster.name         = $scope.cluster.Clusters.cluster_name;
+    $scope.editCluster.editingName  = !$scope.editCluster.editingName;
+  };
+
+  $scope.confirmClusterNameChange = function() {
+    ConfirmationModal.show('Confirm Cluster Name Change', 'Are you sure you want to change the cluster name to ' + $scope.editCluster.name + '?')
+      .then(function() {
+        $scope.saveClusterName();
+      }).catch(function() {
+        // user clicked cancel
+        $scope.toggleEditName();
+      });
+  };
+
+  $scope.saveClusterName = function() {
+    var oldClusterName = $scope.cluster.Clusters.cluster_name,
+        newClusterName = $scope.editCluster.name;
+
+    Cluster.editName(oldClusterName, newClusterName).then(function(data) {
+      $scope.cluster.Clusters.cluster_name = newClusterName;
+      uiAlert.success('Success', 'The cluster has been renamed to ' + newClusterName + '.');
+    }).catch(function(data) {
+      uiAlert.danger(data.data.status, data.data.message);
+    });
+
+    $scope.toggleEditName();
+  };
+
   $scope.isActive = function(path) {
   $scope.isActive = function(path) {
   	var route = ROUTES;
   	var route = ROUTES;
   	angular.forEach(path.split('.'), function(routeObj) {
   	angular.forEach(path.split('.'), function(routeObj) {
@@ -54,4 +93,4 @@ angular.module('ambariAdminConsole')
       });
       });
     });
     });
   };
   };
-}]);
+}]);

+ 12 - 2
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Cluster.js

@@ -48,8 +48,7 @@ angular.module('ambariAdminConsole')
         deferred.resolve(data.items);
         deferred.resolve(data.items);
       })
       })
       .catch(function(data) {
       .catch(function(data) {
-        deferred.reject(data);
-      });
+        deferred.reject(data); });
 
 
       return deferred.promise;
       return deferred.promise;
     },
     },
@@ -103,6 +102,17 @@ angular.module('ambariAdminConsole')
           'PrivilegeInfo/permission_name': permissionName
           'PrivilegeInfo/permission_name': permissionName
         }
         }
       });
       });
+    },
+    editName: function(oldName, newName) {
+      return $http({
+        method: 'PUT',
+        url: Settings.baseUrl + '/clusters/' + oldName,
+        data: {
+          Clusters: {
+            "cluster_name": newName
+          }
+        }
+      });
     }
     }
   };
   };
 }]);
 }]);

+ 18 - 0
ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css

@@ -736,3 +736,21 @@ input[type="submit"].btn.btn-mini {
 .breadcrumb > .active {
 .breadcrumb > .active {
   color: #666;
   color: #666;
 }
 }
+
+.edit-cluster-name {
+  cursor: pointer;
+}
+
+.edit-cluster-name:hover {
+  color: #428bca;
+}
+
+.editClusterNameForm button.btn {
+  padding: 4px 8px;
+}
+
+.editClusterNameForm input {
+  width: 161px;
+  float: left;
+  margin-right: 5px;
+}

+ 36 - 3
ambari-admin/src/main/resources/ui/admin-web/app/views/leftNavbar.html

@@ -15,12 +15,45 @@
 * See the License for the specific language governing permissions and
 * See the License for the specific language governing permissions and
 * limitations under the License.
 * limitations under the License.
 -->
 -->
-<div class="left-navbar">
+<div class="left-navbar" xmlns="http://www.w3.org/1999/html">
   <div class="panel panel-default">
   <div class="panel panel-default">
     <div class="panel-heading"><span class="glyphicon glyphicon-cloud"></span> Clusters</div>
     <div class="panel-heading"><span class="glyphicon glyphicon-cloud"></span> Clusters</div>
     <div class="panel-body">
     <div class="panel-body">
       <div ng-show="cluster">
       <div ng-show="cluster">
-        <h5>{{cluster.Clusters.cluster_name}}</h5>
+        <div ng-switch on="editCluster.editingName">
+          <h5 ng-switch-when="false">{{cluster.Clusters.cluster_name}}
+            <i ng-click="toggleEditName()" class="glyphicon glyphicon-edit pull-right edit-cluster-name" tooltip="Rename Cluster"></i>
+          </h5>
+
+          <form ng-keyup="toggleEditName($event)" tabindex="1" name="editClusterNameForm" class="editClusterNameForm" ng-switch-when="true"
+                ng-submit="editCluster.name !== cluster.Clusters.cluster_name && editClusterNameForm.newClusterName.$valid && confirmClusterNameChange()">
+            <div class="form-group" ng-class="{'has-error': editClusterNameForm.newClusterName.$invalid && !editClusterNameForm.newClusterName.$pristine }">
+              <input
+                  autofocus
+                  type="text"
+                  name="newClusterName"
+                  ng-required="true"
+                  ng-pattern="/^[a-zA-Z0-9]*$/"
+                  ng-model="editCluster.name"
+                  class="form-control input-sm"
+                  tooltip="Only alpha-numeric characters."
+                  tooltip-trigger="focus">
+
+              <button
+                  type="submit"
+                  class="btn btn-success btn-xs"
+                  ng-class="{'disabled': editClusterNameForm.newClusterName.$invalid || editCluster.name == cluster.Clusters.cluster_name}">
+                <i class="glyphicon glyphicon-ok"></i>
+              </button>
+              <button ng-click="toggleEditName()"
+                      class="btn btn-danger btn-xs">
+                <i class="glyphicon glyphicon-remove"></i>
+              </button>
+            </div>
+          </form>
+
+        </div>
+
         <ul class="nav nav-pills nav-stacked">
         <ul class="nav nav-pills nav-stacked">
           <li ng-class="{active: isActive('clusters.manageAccess')}">
           <li ng-class="{active: isActive('clusters.manageAccess')}">
             <a href="#/clusters/{{cluster.Clusters.cluster_name}}/manageAccess" class="permissions">Permissions</a>
             <a href="#/clusters/{{cluster.Clusters.cluster_name}}/manageAccess" class="permissions">Permissions</a>
@@ -69,4 +102,4 @@
   </div>
   </div>
 
 
 </div>
 </div>
-  
+  

+ 6 - 0
ambari-agent/conf/unix/install-helper.sh

@@ -20,10 +20,12 @@
 
 
 COMMON_DIR="/usr/lib/python2.6/site-packages/ambari_commons"
 COMMON_DIR="/usr/lib/python2.6/site-packages/ambari_commons"
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
+JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
 OLD_COMMON_DIR="/usr/lib/python2.6/site-packages/common_functions"
 OLD_COMMON_DIR="/usr/lib/python2.6/site-packages/common_functions"
 INSTALL_HELPER_SERVER="/var/lib/ambari-server/install-helper.sh"
 INSTALL_HELPER_SERVER="/var/lib/ambari-server/install-helper.sh"
 COMMON_DIR_AGENT="/usr/lib/ambari-agent/lib/ambari_commons"
 COMMON_DIR_AGENT="/usr/lib/ambari-agent/lib/ambari_commons"
 RESOURCE_MANAGEMENT_DIR_AGENT="/usr/lib/ambari-agent/lib/resource_management"
 RESOURCE_MANAGEMENT_DIR_AGENT="/usr/lib/ambari-agent/lib/resource_management"
+JINJA_AGENT_DIR="/usr/lib/ambari-agent/lib/ambari_jinja2"
 
 
 PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
 PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
 PYTHON_WRAPER_SOURCE="/var/lib/ambari-agent/ambari-python-wrap"
 PYTHON_WRAPER_SOURCE="/var/lib/ambari-agent/ambari-python-wrap"
@@ -38,6 +40,10 @@ do_install(){
   if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
   if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
     ln -s "$RESOURCE_MANAGEMENT_DIR_AGENT" "$RESOURCE_MANAGEMENT_DIR"
     ln -s "$RESOURCE_MANAGEMENT_DIR_AGENT" "$RESOURCE_MANAGEMENT_DIR"
   fi
   fi
+  # setting jinja2 shared resource
+  if [ ! -d "$JINJA_DIR" ]; then
+    ln -s "$JINJA_AGENT_DIR" "$JINJA_DIR"
+  fi
   # setting python-wrapper script
   # setting python-wrapper script
   if [ ! -f "$PYTHON_WRAPER_TARGET" ]; then
   if [ ! -f "$PYTHON_WRAPER_TARGET" ]; then
     ln -s "$PYTHON_WRAPER_SOURCE" "$PYTHON_WRAPER_TARGET"
     ln -s "$PYTHON_WRAPER_SOURCE" "$PYTHON_WRAPER_TARGET"

+ 1 - 1
ambari-agent/pom.xml

@@ -40,7 +40,7 @@
     <agent.install.dir>/usr/lib/python2.6/site-packages/ambari_agent</agent.install.dir>
     <agent.install.dir>/usr/lib/python2.6/site-packages/ambari_agent</agent.install.dir>
     <ambari_commons.install.dir>/usr/lib/ambari-agent/lib/ambari_commons</ambari_commons.install.dir>
     <ambari_commons.install.dir>/usr/lib/ambari-agent/lib/ambari_commons</ambari_commons.install.dir>
     <resource_management.install.dir>/usr/lib/ambari-agent/lib/resource_management</resource_management.install.dir>
     <resource_management.install.dir>/usr/lib/ambari-agent/lib/resource_management</resource_management.install.dir>
-    <jinja.install.dir>/usr/lib/python2.6/site-packages/ambari_jinja2</jinja.install.dir>
+    <jinja.install.dir>/usr/lib/ambari-agent/lib/ambari_jinja2</jinja.install.dir>
     <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
     <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
     <python.ver>python &gt;= 2.6</python.ver>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>

+ 7 - 0
ambari-agent/src/main/package/rpm/posttrans_agent.sh

@@ -16,6 +16,8 @@
 
 
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
 RESOURCE_MANAGEMENT_DIR_AGENT="/usr/lib/ambari-agent/lib/resource_management"
 RESOURCE_MANAGEMENT_DIR_AGENT="/usr/lib/ambari-agent/lib/resource_management"
+JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
+JINJA_AGENT_DIR="/usr/lib/ambari-agent/lib/ambari_jinja2"
 
 
 # remove RESOURCE_MANAGEMENT_DIR if it's a directory
 # remove RESOURCE_MANAGEMENT_DIR if it's a directory
 if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists
 if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists
@@ -28,4 +30,9 @@ if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
   ln -s "$RESOURCE_MANAGEMENT_DIR_AGENT" "$RESOURCE_MANAGEMENT_DIR"
   ln -s "$RESOURCE_MANAGEMENT_DIR_AGENT" "$RESOURCE_MANAGEMENT_DIR"
 fi
 fi
 
 
+# setting jinja2 shared resource
+if [ ! -d "$JINJA_DIR" ]; then
+  ln -s "$JINJA_AGENT_DIR" "$JINJA_DIR"
+fi
+
 exit 0
 exit 0

+ 1 - 4
ambari-client/groovy-client/src/main/resources/blueprints/hdp-multinode-default

@@ -1,7 +1,7 @@
 {
 {
     "configurations" : [
     "configurations" : [
         {
         {
-            "global" : {
+            "nagios-env" : {
                 "nagios_contact" : "admin@localhost"
                 "nagios_contact" : "admin@localhost"
             }
             }
         }
         }
@@ -134,9 +134,6 @@
                 {
                 {
                     "name" : "NAGIOS_SERVER"
                     "name" : "NAGIOS_SERVER"
                 },
                 },
-                {
-                    "name" : "GANGLIA_SERVER"
-                },
                 {
                 {
                     "name" : "ZOOKEEPER_CLIENT"
                     "name" : "ZOOKEEPER_CLIENT"
                 },
                 },

+ 1 - 1
ambari-client/groovy-client/src/main/resources/blueprints/hdp-singlenode-default

@@ -1,7 +1,7 @@
 {
 {
     "configurations" : [
     "configurations" : [
         {
         {
-            "global" : {
+            "nagios-env" : {
                 "nagios_contact" : "admin@localhost"
                 "nagios_contact" : "admin@localhost"
             }
             }
         }
         }

+ 3 - 9
ambari-client/groovy-client/src/main/resources/blueprints/lambda-architecture

@@ -1,9 +1,9 @@
 {
 {
   "configurations": [
   "configurations": [
     {
     {
-      "global": {
-        "nagios_contact": "me@my-awesome-domain.example"
-      }
+        "nagios-env" : {
+            "nagios_contact" : "admin@localhost"
+        }
     }
     }
   ],
   ],
   "host_groups": [
   "host_groups": [
@@ -100,9 +100,6 @@
         {
         {
           "name": "YARN_CLIENT"
           "name": "YARN_CLIENT"
         },
         },
-        {
-          "name" : "APP_TIMELINE_SERVER"
-        },
         {
         {
           "name": "MAPREDUCE2_CLIENT"
           "name": "MAPREDUCE2_CLIENT"
         },
         },
@@ -157,9 +154,6 @@
         {
         {
           "name": "DATANODE"
           "name": "DATANODE"
         },
         },
-        {
-          "name" : "APP_TIMELINE_SERVER"
-        },
         {
         {
           "name": "GANGLIA_MONITOR"
           "name": "GANGLIA_MONITOR"
         }
         }

+ 3 - 3
ambari-client/groovy-client/src/main/resources/blueprints/multi-node-hdfs-yarn

@@ -1,9 +1,9 @@
 {
 {
   "configurations": [
   "configurations": [
     {
     {
-      "global": {
-        "nagios_contact": "me@my-awesome-domain.example"
-      }
+        "nagios-env" : {
+            "nagios_contact" : "admin@localhost"
+        }
     }
     }
   ],
   ],
   "host_groups": [
   "host_groups": [

+ 1 - 1
ambari-client/groovy-client/src/main/resources/blueprints/single-node-hdfs-yarn

@@ -8,7 +8,7 @@
       },
       },
       {
       {
         "name" : "SECONDARY_NAMENODE"
         "name" : "SECONDARY_NAMENODE"
-      },       
+      },
       {
       {
         "name" : "DATANODE"
         "name" : "DATANODE"
       },
       },

+ 4 - 4
ambari-client/groovy-client/src/main/resources/blueprints/warmup

@@ -1,9 +1,9 @@
 {
 {
   "configurations": [
   "configurations": [
     {
     {
-      "global": {
-        "nagios_contact": "me@my-awesome-domain.example"
-      }
+        "nagios-env" : {
+            "nagios_contact" : "admin@localhost"
+        }
     }
     }
   ],
   ],
   "host_groups": [
   "host_groups": [
@@ -91,4 +91,4 @@
     "stack_name": "HDP",
     "stack_name": "HDP",
     "stack_version": "2.1"
     "stack_version": "2.1"
   }
   }
-}
+}

+ 6 - 0
ambari-server/conf/unix/install-helper.sh

@@ -19,10 +19,12 @@
 
 
 COMMON_DIR="/usr/lib/python2.6/site-packages/ambari_commons"
 COMMON_DIR="/usr/lib/python2.6/site-packages/ambari_commons"
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
+JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
 OLD_COMMON_DIR="/usr/lib/python2.6/site-packages/common_functions"
 OLD_COMMON_DIR="/usr/lib/python2.6/site-packages/common_functions"
 INSTALL_HELPER_AGENT="/var/lib/ambari-agent/install-helper.sh"
 INSTALL_HELPER_AGENT="/var/lib/ambari-agent/install-helper.sh"
 COMMON_DIR_SERVER="/usr/lib/ambari-server/lib/ambari_commons"
 COMMON_DIR_SERVER="/usr/lib/ambari-server/lib/ambari_commons"
 RESOURCE_MANAGEMENT_DIR_SERVER="/usr/lib/ambari-server/lib/resource_management"
 RESOURCE_MANAGEMENT_DIR_SERVER="/usr/lib/ambari-server/lib/resource_management"
+JINJA_SERVER_DIR="/usr/lib/ambari-server/lib/ambari_jinja2"
 
 
 PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
 PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
 PYTHON_WRAPER_SOURCE="/var/lib/ambari-server/ambari-python-wrap"
 PYTHON_WRAPER_SOURCE="/var/lib/ambari-server/ambari-python-wrap"
@@ -37,6 +39,10 @@ do_install(){
   if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
   if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
     ln -s "$RESOURCE_MANAGEMENT_DIR_SERVER" "$RESOURCE_MANAGEMENT_DIR"
     ln -s "$RESOURCE_MANAGEMENT_DIR_SERVER" "$RESOURCE_MANAGEMENT_DIR"
   fi
   fi
+  # setting jinja2 shared resource
+  if [ ! -d "$JINJA_DIR" ]; then
+    ln -s "$JINJA_SERVER_DIR" "$JINJA_DIR"
+  fi
   # setting python-wrapper script
   # setting python-wrapper script
   if [ ! -f "$PYTHON_WRAPER_TARGET" ]; then
   if [ ! -f "$PYTHON_WRAPER_TARGET" ]; then
     ln -s "$PYTHON_WRAPER_SOURCE" "$PYTHON_WRAPER_TARGET"
     ln -s "$PYTHON_WRAPER_SOURCE" "$PYTHON_WRAPER_TARGET"

+ 25 - 0
ambari-server/pom.xml

@@ -33,6 +33,7 @@
     <hdpLatestUrl>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</hdpLatestUrl>
     <hdpLatestUrl>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</hdpLatestUrl>
     <ambari_commons.install.dir>/usr/lib/ambari-server/lib/ambari_commons</ambari_commons.install.dir>
     <ambari_commons.install.dir>/usr/lib/ambari-server/lib/ambari_commons</ambari_commons.install.dir>
     <resource_management.install.dir>/usr/lib/ambari-server/lib/resource_management</resource_management.install.dir>
     <resource_management.install.dir>/usr/lib/ambari-server/lib/resource_management</resource_management.install.dir>
+    <jinja.install.dir>/usr/lib/ambari-server/lib/ambari_jinja2</jinja.install.dir>
     <ambari-web-dir>${basedir}/../ambari-web/public</ambari-web-dir>
     <ambari-web-dir>${basedir}/../ambari-web/public</ambari-web-dir>
     <ambari-admin-dir>${basedir}/../ambari-admin</ambari-admin-dir>
     <ambari-admin-dir>${basedir}/../ambari-admin</ambari-admin-dir>
     <contrib-views-dir>${basedir}/../contrib/views</contrib-views-dir>
     <contrib-views-dir>${basedir}/../contrib/views</contrib-views-dir>
@@ -282,6 +283,19 @@
                   </location>
                   </location>
                 </source>
                 </source>
               </sources>
               </sources>
+            </mapping>
+            <mapping>
+              <directory>${jinja.install.dir}</directory>
+              <username>root</username>
+              <groupname>root</groupname>
+              <sources>
+                <source>
+                  <location>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</location>
+                  <excludes>
+                    <exclude>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2/testsuite</exclude>
+                  </excludes>
+                </source>
+              </sources>
             </mapping>
             </mapping>
               <mapping>
               <mapping>
               <directory>/usr/sbin</directory>
               <directory>/usr/sbin</directory>
@@ -922,6 +936,17 @@
                 <group>root</group>
                 <group>root</group>
               </mapper>
               </mapper>
             </data>
             </data>
+            <data>
+              <src>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</src>
+              <excludes>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2/testsuite</excludes>
+              <type>directory</type>
+              <mapper>
+                <type>perm</type>
+                <prefix>${jinja.install.dir}</prefix>
+                <user>root</user>
+                <group>root</group>
+              </mapper>
+            </data>
           </dataSet>
           </dataSet>
         </configuration>
         </configuration>
       </plugin>
       </plugin>

+ 1 - 7
ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java

@@ -334,13 +334,7 @@ public class StackExtensionHelper {
             child.getConfigDependencies() : parent.getConfigDependencies());
             child.getConfigDependencies() : parent.getConfigDependencies());
 
 
 
 
-//    HashSet downloadSource = child.getDownloadSource();
-//    if (downloadSource != null) {
-//      result.setDownloadSource(child.getDownloadSource());
-//    } else {
-//      result.setDownloadSource(parent.getDownloadSource());
-//    }
-//
+    //Merge client config file definitions
     List<ClientConfigFileDefinition> clientConfigFiles = child.getClientConfigFiles();
     List<ClientConfigFileDefinition> clientConfigFiles = child.getClientConfigFiles();
     if (clientConfigFiles != null) {
     if (clientConfigFiles != null) {
       result.setClientConfigFiles(child.getClientConfigFiles());
       result.setClientConfigFiles(child.getClientConfigFiles());

+ 36 - 0
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -302,6 +302,12 @@ public class Configuration {
   private static final String AGENT_THREADPOOL_SIZE_KEY = "agent.threadpool.size.max";
   private static final String AGENT_THREADPOOL_SIZE_KEY = "agent.threadpool.size.max";
   private static final int AGENT_THREADPOOL_SIZE_DEFAULT = 25;
   private static final int AGENT_THREADPOOL_SIZE_DEFAULT = 25;
 
 
+  private static final String VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_KEY = "view.extraction.threadpool.size.max";
+  private static final int VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_DEFAULT = 20;
+  private static final String VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_KEY = "view.extraction.threadpool.size.core";
+  private static final int VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_DEFAULT = 10;
+  private static final String VIEW_EXTRACTION_THREADPOOL_TIMEOUT_KEY = "view.extraction.threadpool.timeout";
+  private static final long VIEW_EXTRACTION_THREADPOOL_TIMEOUT_DEFAULT = 100000L;
 
 
   private static final Logger LOG = LoggerFactory.getLogger(
   private static final Logger LOG = LoggerFactory.getLogger(
       Configuration.class);
       Configuration.class);
@@ -1031,4 +1037,34 @@ public class Configuration {
     return Integer.parseInt(properties.getProperty(
     return Integer.parseInt(properties.getProperty(
         AGENT_THREADPOOL_SIZE_KEY, String.valueOf(AGENT_THREADPOOL_SIZE_DEFAULT)));
         AGENT_THREADPOOL_SIZE_KEY, String.valueOf(AGENT_THREADPOOL_SIZE_DEFAULT)));
   }
   }
+
+  /**
+   * Get the view extraction thread pool max size.
+   *
+   * @return the view extraction thread pool max size
+   */
+  public int getViewExtractionThreadPoolMaxSize() {
+    return Integer.parseInt(properties.getProperty(
+        VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_DEFAULT)));
+  }
+
+  /**
+   * Get the view extraction thread pool core size.
+   *
+   * @return the view extraction thread pool core size
+   */
+  public int getViewExtractionThreadPoolCoreSize() {
+    return Integer.parseInt(properties.getProperty(
+        VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_DEFAULT)));
+  }
+
+  /**
+   * Get the view extraction thread pool timeout.
+   *
+   * @return the view extraction thread pool timeout
+   */
+  public long getViewExtractionThreadPoolTimeout() {
+    return Long.parseLong(properties.getProperty(
+        VIEW_EXTRACTION_THREADPOOL_TIMEOUT_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_TIMEOUT_DEFAULT)));
+  }
 }
 }

+ 19 - 5
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -1144,17 +1144,23 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   private synchronized RequestStatusResponse updateCluster(ClusterRequest request)
   private synchronized RequestStatusResponse updateCluster(ClusterRequest request)
       throws AmbariException {
       throws AmbariException {
 
 
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()) {
-      throw new IllegalArgumentException("Invalid arguments, cluster name"
-          + " should not be null");
+    if (request.getClusterId() == null
+        && (request.getClusterName() == null
+        || request.getClusterName().isEmpty())) {
+      throw new IllegalArgumentException("Invalid arguments, cluster id or cluster name should not be null");
     }
     }
 
 
     LOG.info("Received a updateCluster request"
     LOG.info("Received a updateCluster request"
+        + ", clusterId=" + request.getClusterId()
         + ", clusterName=" + request.getClusterName()
         + ", clusterName=" + request.getClusterName()
         + ", request=" + request);
         + ", request=" + request);
 
 
-    final Cluster cluster = clusters.getCluster(request.getClusterName());
+    final Cluster cluster;
+    if (request.getClusterId() == null) {
+      cluster = clusters.getCluster(request.getClusterName());
+    } else {
+      cluster = clusters.getClusterById(request.getClusterId());
+    }
     //save data to return configurations created
     //save data to return configurations created
     List<ConfigurationResponse> configurationResponses =
     List<ConfigurationResponse> configurationResponses =
       new LinkedList<ConfigurationResponse>();
       new LinkedList<ConfigurationResponse>();
@@ -1166,6 +1172,14 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       throw new IllegalArgumentException(msg);
       throw new IllegalArgumentException(msg);
     }
     }
 
 
+    // set the new name of the cluster if change is requested
+    if (!cluster.getClusterName().equals(request.getClusterName())) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Received cluster name change request from " + cluster.getClusterName() + " to " + request.getClusterName());
+      }
+      cluster.setClusterName(request.getClusterName());
+    }
+
     // set or create configuration mapping (and optionally create the map of properties)
     // set or create configuration mapping (and optionally create the map of properties)
     if (null != request.getDesiredConfig()) {
     if (null != request.getDesiredConfig()) {
       Set<Config> configs = new HashSet<Config>();
       Set<Config> configs = new HashSet<Config>();

+ 1 - 9
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

@@ -71,7 +71,6 @@ import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
-import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.resources.ResourceManager;
 import org.apache.ambari.server.resources.ResourceManager;
 import org.apache.ambari.server.resources.api.rest.GetResource;
 import org.apache.ambari.server.resources.api.rest.GetResource;
 import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
 import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
@@ -91,7 +90,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.ambari.server.utils.VersionUtils;
 import org.apache.ambari.server.utils.VersionUtils;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistry;
-import org.apache.ambari.view.SystemException;
 import org.eclipse.jetty.server.Connector;
 import org.eclipse.jetty.server.Connector;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.nio.SelectChannelConnector;
 import org.eclipse.jetty.server.nio.SelectChannelConnector;
@@ -308,13 +306,7 @@ public class AmbariServer {
       root.addServlet(sh, "/api/v1/*");
       root.addServlet(sh, "/api/v1/*");
       sh.setInitOrder(2);
       sh.setInitOrder(2);
 
 
-      try {
-        for (ViewInstanceEntity entity : viewRegistry.readViewArchives(configs)){
-          handlerList.addViewInstance(entity);
-        }
-      } catch (SystemException e) {
-        LOG.error("Caught exception deploying views.", e);
-      }
+      viewRegistry.readViewArchives();
 
 
       handlerList.addHandler(root);
       handlerList.addHandler(root);
 
 

+ 5 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java

@@ -201,6 +201,11 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         }
         }
       }
       }
 
 
+      // Hack - Remove passwords from configs
+      if (configurations.get(Configuration.HIVE_CONFIG_TAG)!=null) {
+        configurations.get(Configuration.HIVE_CONFIG_TAG).remove(Configuration.HIVE_METASTORE_PASSWORD_PROPERTY);
+      }
+
       Map<String, Set<String>> clusterHostInfo = null;
       Map<String, Set<String>> clusterHostInfo = null;
       ServiceInfo serviceInfo = null;
       ServiceInfo serviceInfo = null;
       String osFamily = null;
       String osFamily = null;

+ 14 - 8
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewVersionResourceProvider.java

@@ -44,14 +44,16 @@ public class ViewVersionResourceProvider extends AbstractResourceProvider {
   /**
   /**
    * View property id constants.
    * View property id constants.
    */
    */
-  public static final String VIEW_NAME_PROPERTY_ID      = "ViewVersionInfo/view_name";
-  public static final String VIEW_VERSION_PROPERTY_ID   = "ViewVersionInfo/version";
-  public static final String LABEL_PROPERTY_ID          = "ViewVersionInfo/label";
-  public static final String DESCRIPTION_PROPERTY_ID    = "ViewVersionInfo/description";
-  public static final String VERSION_PROPERTY_ID        = "ViewVersionInfo/version";
-  public static final String PARAMETERS_PROPERTY_ID     = "ViewVersionInfo/parameters";
-  public static final String ARCHIVE_PROPERTY_ID        = "ViewVersionInfo/archive";
-  public static final String MASKER_CLASS_PROPERTY_ID   = "ViewVersionInfo/masker_class";
+  public static final String VIEW_NAME_PROPERTY_ID          = "ViewVersionInfo/view_name";
+  public static final String VIEW_VERSION_PROPERTY_ID       = "ViewVersionInfo/version";
+  public static final String LABEL_PROPERTY_ID              = "ViewVersionInfo/label";
+  public static final String DESCRIPTION_PROPERTY_ID        = "ViewVersionInfo/description";
+  public static final String VERSION_PROPERTY_ID            = "ViewVersionInfo/version";
+  public static final String PARAMETERS_PROPERTY_ID         = "ViewVersionInfo/parameters";
+  public static final String ARCHIVE_PROPERTY_ID            = "ViewVersionInfo/archive";
+  public static final String MASKER_CLASS_PROPERTY_ID       = "ViewVersionInfo/masker_class";
+  public static final String VIEW_STATUS_PROPERTY_ID        = "ViewVersionInfo/status";
+  public static final String VIEW_STATUS_DETAIL_PROPERTY_ID = "ViewVersionInfo/status_detail";
 
 
   /**
   /**
    * The key property ids for a view resource.
    * The key property ids for a view resource.
@@ -75,6 +77,8 @@ public class ViewVersionResourceProvider extends AbstractResourceProvider {
     propertyIds.add(PARAMETERS_PROPERTY_ID);
     propertyIds.add(PARAMETERS_PROPERTY_ID);
     propertyIds.add(ARCHIVE_PROPERTY_ID);
     propertyIds.add(ARCHIVE_PROPERTY_ID);
     propertyIds.add(MASKER_CLASS_PROPERTY_ID);
     propertyIds.add(MASKER_CLASS_PROPERTY_ID);
+    propertyIds.add(VIEW_STATUS_PROPERTY_ID);
+    propertyIds.add(VIEW_STATUS_DETAIL_PROPERTY_ID);
   }
   }
 
 
 
 
@@ -129,6 +133,8 @@ public class ViewVersionResourceProvider extends AbstractResourceProvider {
                 viewDefinition.getConfiguration().getParameters(), requestedIds);
                 viewDefinition.getConfiguration().getParameters(), requestedIds);
             setResourceProperty(resource, ARCHIVE_PROPERTY_ID, viewDefinition.getArchive(), requestedIds);
             setResourceProperty(resource, ARCHIVE_PROPERTY_ID, viewDefinition.getArchive(), requestedIds);
             setResourceProperty(resource, MASKER_CLASS_PROPERTY_ID, viewDefinition.getMask(), requestedIds);
             setResourceProperty(resource, MASKER_CLASS_PROPERTY_ID, viewDefinition.getMask(), requestedIds);
+            setResourceProperty(resource, VIEW_STATUS_PROPERTY_ID, viewDefinition.getStatus().toString(), requestedIds);
+            setResourceProperty(resource, VIEW_STATUS_DETAIL_PROPERTY_ID, viewDefinition.getStatusDetail(), requestedIds);
 
 
             resources.add(resource);
             resources.add(resource);
           }
           }

+ 6 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProvider.java

@@ -79,10 +79,12 @@ public class NagiosPropertyProvider extends BaseProvider implements PropertyProv
   private static final String ALERT_SUMMARY_PASSIVE_PROPERTY_ID = "alerts/summary/PASSIVE";
   private static final String ALERT_SUMMARY_PASSIVE_PROPERTY_ID = "alerts/summary/PASSIVE";
   private static final String PASSIVE_TOKEN = "AMBARIPASSIVE=";
   private static final String PASSIVE_TOKEN = "AMBARIPASSIVE=";
   
   
-  private static final List<String> IGNORABLE_FOR_SERVICES = new ArrayList<String>(
+  private static final List<String> DEFAULT_IGNORABLE_FOR_SERVICES = Collections.unmodifiableList(new ArrayList<String>(
       Arrays.asList("NodeManager health", "NodeManager process", "TaskTracker process",
       Arrays.asList("NodeManager health", "NodeManager process", "TaskTracker process",
       "RegionServer process", "DataNode process", "DataNode space",
       "RegionServer process", "DataNode process", "DataNode space",
-      "ZooKeeper Server process", "Supervisors process"));
+      "ZooKeeper Server process", "Supervisors process")));
+  
+  private static List<String> IGNORABLE_FOR_SERVICES;
   
   
   private static final List<String> IGNORABLE_FOR_HOSTS = new ArrayList<String>(
   private static final List<String> IGNORABLE_FOR_HOSTS = new ArrayList<String>(
     Collections.singletonList("percent"));
     Collections.singletonList("percent"));
@@ -99,6 +101,7 @@ public class NagiosPropertyProvider extends BaseProvider implements PropertyProv
   static {
   static {
     NAGIOS_PROPERTY_IDS.add("alerts/summary");
     NAGIOS_PROPERTY_IDS.add("alerts/summary");
     NAGIOS_PROPERTY_IDS.add("alerts/detail");
     NAGIOS_PROPERTY_IDS.add("alerts/detail");
+    IGNORABLE_FOR_SERVICES = new ArrayList<String>(DEFAULT_IGNORABLE_FOR_SERVICES);
 
 
     scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
     scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
       @Override
       @Override
@@ -121,6 +124,7 @@ public class NagiosPropertyProvider extends BaseProvider implements PropertyProv
     clusters = injector.getInstance(Clusters.class);
     clusters = injector.getInstance(Clusters.class);
     Configuration config = injector.getInstance(Configuration.class);
     Configuration config = injector.getInstance(Configuration.class);
     
     
+    IGNORABLE_FOR_SERVICES = new ArrayList<String>(DEFAULT_IGNORABLE_FOR_SERVICES);
     String ignores = config.getProperty(Configuration.NAGIOS_IGNORE_FOR_SERVICES_KEY);
     String ignores = config.getProperty(Configuration.NAGIOS_IGNORE_FOR_SERVICES_KEY);
     if (null != ignores) {
     if (null != ignores) {
       Collections.addAll(IGNORABLE_FOR_SERVICES, COMMA_PATTERN.split(ignores));
       Collections.addAll(IGNORABLE_FOR_SERVICES, COMMA_PATTERN.split(ignores));

+ 49 - 6
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java

@@ -171,7 +171,7 @@ public class ViewEntity implements ViewDefinition {
    * The classloader used to load the view.
    * The classloader used to load the view.
    */
    */
   @Transient
   @Transient
-  private final ClassLoader classLoader;
+  private ClassLoader classLoader = null;
 
 
   /**
   /**
    * The mapping of resource type to resource provider.
    * The mapping of resource type to resource provider.
@@ -203,6 +203,18 @@ public class ViewEntity implements ViewDefinition {
   @Transient
   @Transient
   private View view = null;
   private View view = null;
 
 
+  /**
+   * The view status.
+   */
+  @Transient
+  private ViewStatus status = ViewStatus.PENDING;
+
+  /**
+   * The view status detail.
+   */
+  @Transient
+  private String statusDetail;
+
 
 
   // ----- Constructors ------------------------------------------------------
   // ----- Constructors ------------------------------------------------------
 
 
@@ -212,7 +224,6 @@ public class ViewEntity implements ViewDefinition {
   public ViewEntity() {
   public ViewEntity() {
     this.configuration        = null;
     this.configuration        = null;
     this.ambariConfiguration  = null;
     this.ambariConfiguration  = null;
-    this.classLoader          = null;
     this.archive              = null;
     this.archive              = null;
     this.externalResourceType = null;
     this.externalResourceType = null;
   }
   }
@@ -222,14 +233,12 @@ public class ViewEntity implements ViewDefinition {
    *
    *
    * @param configuration        the view configuration
    * @param configuration        the view configuration
    * @param ambariConfiguration  the Ambari configuration
    * @param ambariConfiguration  the Ambari configuration
-   * @param classLoader          the class loader
    * @param archivePath          the path of the view archive
    * @param archivePath          the path of the view archive
    */
    */
   public ViewEntity(ViewConfig configuration, Configuration ambariConfiguration,
   public ViewEntity(ViewConfig configuration, Configuration ambariConfiguration,
-                        ClassLoader classLoader, String archivePath) {
+                    String archivePath) {
     this.configuration       = configuration;
     this.configuration       = configuration;
     this.ambariConfiguration = ambariConfiguration;
     this.ambariConfiguration = ambariConfiguration;
-    this.classLoader         = classLoader;
     this.archive             = archivePath;
     this.archive             = archivePath;
 
 
     String version = configuration.getVersion();
     String version = configuration.getVersion();
@@ -270,6 +279,16 @@ public class ViewEntity implements ViewDefinition {
     return version;
     return version;
   }
   }
 
 
+  @Override
+  public ViewStatus getStatus() {
+    return status;
+  }
+
+  @Override
+  public String getStatusDetail() {
+    return statusDetail;
+  }
+
 
 
   // ----- ViewEntity --------------------------------------------------------
   // ----- ViewEntity --------------------------------------------------------
 
 
@@ -562,6 +581,15 @@ public class ViewEntity implements ViewDefinition {
     return classLoader;
     return classLoader;
   }
   }
 
 
+  /**
+   * Set the class loader.
+   *
+   * @param classLoader  the class loader
+   */
+  public void setClassLoader(ClassLoader classLoader) {
+    this.classLoader = classLoader;
+  }
+
   /**
   /**
    * Add a resource provider for the given type.
    * Add a resource provider for the given type.
    *
    *
@@ -703,8 +731,23 @@ public class ViewEntity implements ViewDefinition {
     this.resourceType = resourceType;
     this.resourceType = resourceType;
   }
   }
 
 
+  /**
+   * Set the status of the view.
+   *
+   * @param status  the view status
+   */
+  public void setStatus(ViewStatus status) {
+    this.status = status;
+  }
 
 
-// ----- helper methods ----------------------------------------------------
+  /**
+   * Set the status detail for the view.
+   *
+   * @param statusDetail  the status detail
+   */
+  public void setStatusDetail(String statusDetail) {
+    this.statusDetail = statusDetail;
+  }
 
 
   /**
   /**
    * Get the internal view name from the given common name and version.
    * Get the internal view name from the given common name and version.

+ 11 - 7
ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java

@@ -17,7 +17,11 @@
  */
  */
 package org.apache.ambari.server.security.authorization;
 package org.apache.ambari.server.security.authorization;
 
 
-import com.google.inject.Inject;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
 import org.apache.ambari.server.orm.dao.MemberDAO;
 import org.apache.ambari.server.orm.dao.MemberDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
@@ -28,13 +32,11 @@ import org.apache.ambari.server.orm.entities.UserEntity;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.springframework.ldap.core.DirContextOperations;
 import org.springframework.ldap.core.DirContextOperations;
+import org.springframework.security.authentication.DisabledException;
 import org.springframework.security.core.GrantedAuthority;
 import org.springframework.security.core.GrantedAuthority;
 import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
 import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
 
 
-import java.util.Collection;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
+import com.google.inject.Inject;
 
 
 /**
 /**
  * Provides authorities population for LDAP user from LDAP catalog
  * Provides authorities population for LDAP user from LDAP catalog
@@ -63,12 +65,14 @@ public class AmbariLdapAuthoritiesPopulator implements LdapAuthoritiesPopulator
     UserEntity user;
     UserEntity user;
 
 
     user = userDAO.findLdapUserByName(username);
     user = userDAO.findLdapUserByName(username);
-
+    
     if (user == null) {
     if (user == null) {
       log.error("Can't get authorities for user " + username + ", he is not present in local DB");
       log.error("Can't get authorities for user " + username + ", he is not present in local DB");
       return Collections.emptyList();
       return Collections.emptyList();
     }
     }
-
+    if(!user.getActive()){
+      throw new DisabledException("User is disabled");
+    }
     // get all of the privileges for the user
     // get all of the privileges for the user
     List<PrincipalEntity> principalEntities = new LinkedList<PrincipalEntity>();
     List<PrincipalEntity> principalEntities = new LinkedList<PrincipalEntity>();
 
 

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsService.java

@@ -91,7 +91,7 @@ public class AmbariLocalUserDetailsService implements UserDetailsService {
 
 
     List<PrivilegeEntity> privilegeEntities = privilegeDAO.findAllByPrincipal(principalEntities);
     List<PrivilegeEntity> privilegeEntities = privilegeDAO.findAllByPrincipal(principalEntities);
 
 
-    return new User(user.getUserName(), user.getUserPassword(),
-        authorizationHelper.convertPrivilegesToAuthorities(privilegeEntities));
+    return new User(user.getUserName(), user.getUserPassword(), user.getActive(), 
+        true, true, true, authorizationHelper.convertPrivilegesToAuthorities(privilegeEntities));
   }
   }
 }
 }

+ 132 - 68
ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java

@@ -38,6 +38,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.jar.JarEntry;
 import java.util.jar.JarEntry;
 import java.util.jar.JarFile;
 import java.util.jar.JarFile;
 
 
@@ -117,6 +121,11 @@ public class ViewRegistry {
   private static final String ARCHIVE_LIB_DIR = "WEB-INF/lib";
   private static final String ARCHIVE_LIB_DIR = "WEB-INF/lib";
   private static final String EXTRACTED_ARCHIVES_DIR = "work";
   private static final String EXTRACTED_ARCHIVES_DIR = "work";
 
 
+  /**
+   * Thread pool
+   */
+  private static ExecutorService executorService;
+
   /**
   /**
    * Mapping of view names to view definitions.
    * Mapping of view names to view definitions.
    */
    */
@@ -262,10 +271,6 @@ public class ViewRegistry {
    * @param definition  the definition
    * @param definition  the definition
    */
    */
   public void addDefinition(ViewEntity definition) {
   public void addDefinition(ViewEntity definition) {
-    View view = definition.getView();
-    if (view != null) {
-      view.onDeploy(definition);
-    }
     viewDefinitions.put(definition.getName(), definition);
     viewDefinitions.put(definition.getName(), definition);
   }
   }
 
 
@@ -391,80 +396,56 @@ public class ViewRegistry {
   }
   }
 
 
   /**
   /**
-   * Read the view archives.
-   *
-   * @param configuration  Ambari configuration
-   *
-   * @return the set of view instance definitions read from the archives
-   *
-   * @throws SystemException if the view archives can not be successfully read
+   * Asynchronously read the view archives.
    */
    */
-  public Set<ViewInstanceEntity> readViewArchives(Configuration configuration)
-      throws SystemException {
-
-    try {
-      File viewDir = configuration.getViewsDir();
-
-      Set<ViewInstanceEntity> allInstanceDefinitions = new HashSet<ViewInstanceEntity>();
-
-      String extractedArchivesPath = viewDir.getAbsolutePath() +
-          File.separator + EXTRACTED_ARCHIVES_DIR;
+  public void readViewArchives() {
 
 
-      if (ensureExtractedArchiveDirectory(extractedArchivesPath)) {
-        File[] files = viewDir.listFiles();
+    final ExecutorService executorService = getExecutorService(configuration);
 
 
-        if (files != null) {
-          for (File archiveFile : files) {
-            if (!archiveFile.isDirectory()) {
-              try {
-                ViewConfig viewConfig = helper.getViewConfigFromArchive(archiveFile);
+    // submit a task to manage the extraction tasks
+    executorService.submit(new Runnable() {
+      @Override
+      public void run() {
 
 
-                String viewName    = ViewEntity.getViewName(viewConfig.getName(), viewConfig.getVersion());
-                String archivePath = extractedArchivesPath + File.separator + viewName;
+        try {
+          File viewDir = configuration.getViewsDir();
 
 
-                // extract the archive and get the class loader
-                ClassLoader cl = extractViewArchive(archiveFile, helper.getFile(archivePath));
+          String extractedArchivesPath = viewDir.getAbsolutePath() +
+              File.separator + EXTRACTED_ARCHIVES_DIR;
 
 
-                viewConfig = helper.getViewConfigFromExtractedArchive(archivePath);
+          if (ensureExtractedArchiveDirectory(extractedArchivesPath)) {
+            File[] files = viewDir.listFiles();
 
 
-                ViewEntity viewDefinition = createViewDefinition(viewConfig, configuration, cl, archivePath);
+            if (files != null) {
+              for (final File archiveFile : files) {
+                if (!archiveFile.isDirectory()) {
 
 
-                Set<ViewInstanceEntity> instanceDefinitions = new HashSet<ViewInstanceEntity>();
+                  final ViewConfig viewConfig = helper.getViewConfigFromArchive(archiveFile);
 
 
-                for (InstanceConfig instanceConfig : viewConfig.getInstances()) {
-                  try {
-                    ViewInstanceEntity instanceEntity = createViewInstanceDefinition(viewConfig, viewDefinition, instanceConfig);
-                    instanceEntity.setXmlDriven(true);
-                    instanceDefinitions.add(instanceEntity);
-                  } catch (Exception e) {
-                    LOG.error("Caught exception adding view instance for view " +
-                        viewDefinition.getViewName(), e);
-                  }
-                }
-                // ensure that the view entity matches the db
-                syncView(viewDefinition, instanceDefinitions);
+                  String commonName = viewConfig.getName();
+                  String version    = viewConfig.getVersion();
+                  String viewName   = ViewEntity.getViewName(commonName, version);
 
 
-                // update the registry with the view
-                addDefinition(viewDefinition);
+                  final String     archivePath    = extractedArchivesPath + File.separator + viewName;
+                  final ViewEntity viewDefinition = new ViewEntity(viewConfig, configuration, archivePath);
 
 
-                // update the registry with the view instances
-                for (ViewInstanceEntity instanceEntity : instanceDefinitions) {
-                  addInstanceDefinition(viewDefinition, instanceEntity);
+                  // submit a new task for each archive being read
+                  executorService.submit(new Runnable() {
+                    @Override
+                    public void run() {
+                      readViewArchive(viewDefinition, archiveFile, archivePath, viewConfig);
+                    }
+                  });
                 }
                 }
-
-                allInstanceDefinitions.addAll(instanceDefinitions);
-              } catch (Exception e) {
-                LOG.error("Caught exception loading view from " + archiveFile.getAbsolutePath(), e);
               }
               }
+              removeUndeployedViews();
             }
             }
           }
           }
-          removeUndeployedViews();
+        } catch (Exception e) {
+          LOG.error("Caught exception reading view archives.", e);
         }
         }
       }
       }
-      return allInstanceDefinitions;
-    } catch (Exception e) {
-      throw new SystemException("Caught exception reading view archives.", e);
-    }
+    });
   }
   }
 
 
   /**
   /**
@@ -786,12 +767,12 @@ public class ViewRegistry {
     return viewDefinitions.get(viewName);
     return viewDefinitions.get(viewName);
   }
   }
 
 
-  // create a new view definition
-  protected ViewEntity createViewDefinition(ViewConfig viewConfig, Configuration ambariConfig,
-                                          ClassLoader cl, String archivePath)
+  // setup the given view definition
+  protected ViewEntity setupViewDefinition(ViewEntity viewDefinition, ViewConfig viewConfig,
+                                           ClassLoader cl)
       throws ClassNotFoundException, IntrospectionException {
       throws ClassNotFoundException, IntrospectionException {
 
 
-    ViewEntity viewDefinition = new ViewEntity(viewConfig, ambariConfig, cl, archivePath);
+    viewDefinition.setClassLoader(cl);
 
 
     List<ParameterConfig> parameterConfigurations = viewConfig.getParameters();
     List<ParameterConfig> parameterConfigurations = viewConfig.getParameters();
 
 
@@ -1145,7 +1126,7 @@ public class ViewRegistry {
   }
   }
 
 
   // extract the given view archive to the given archive directory
   // extract the given view archive to the given archive directory
-  private ClassLoader extractViewArchive(File viewArchive, File archiveDir)
+  private ClassLoader extractViewArchive(ViewEntity viewDefinition, File viewArchive, File archiveDir)
       throws IOException {
       throws IOException {
 
 
     // Skip if the archive has already been extracted
     // Skip if the archive has already been extracted
@@ -1153,13 +1134,17 @@ public class ViewRegistry {
 
 
       String archivePath = archiveDir.getAbsolutePath();
       String archivePath = archiveDir.getAbsolutePath();
 
 
-      LOG.info("Creating archive folder " + archivePath + ".");
+      String msg = "Creating archive folder " + archivePath + ".";
+      LOG.info(msg);
+      setViewStatus(viewDefinition, ViewDefinition.ViewStatus.LOADING, msg);
 
 
       if (archiveDir.mkdir()) {
       if (archiveDir.mkdir()) {
         JarFile     viewJarFile = helper.getJarFile(viewArchive);
         JarFile     viewJarFile = helper.getJarFile(viewArchive);
         Enumeration enumeration = viewJarFile.entries();
         Enumeration enumeration = viewJarFile.entries();
 
 
-        LOG.info("Extracting files from " + viewArchive.getName() + ":");
+        msg = "Extracting files from " + viewArchive.getName() + ":";
+        LOG.info(msg);
+        setViewStatus(viewDefinition, ViewDefinition.ViewStatus.LOADING, msg);
 
 
         while (enumeration.hasMoreElements()) {
         while (enumeration.hasMoreElements()) {
           JarEntry jarEntry  = (JarEntry) enumeration.nextElement();
           JarEntry jarEntry  = (JarEntry) enumeration.nextElement();
@@ -1268,6 +1253,85 @@ public class ViewRegistry {
     return false;
     return false;
   }
   }
 
 
+  // fire the onDeploy event.
+  protected void onDeploy(ViewEntity definition) {
+    View view = definition.getView();
+    if (view != null) {
+      view.onDeploy(definition);
+    }
+  }
+
+  // read a view archive and return the set of new view instances
+  private void readViewArchive(ViewEntity viewDefinition,
+                                                  File archiveFile,
+                                                  String archivePath,
+                                                  ViewConfig viewConfig) {
+
+    setViewStatus(viewDefinition, ViewEntity.ViewStatus.LOADING, "Loading " + archivePath + ".");
+
+    try {
+      // update the registry with the view
+      addDefinition(viewDefinition);
+
+      // extract the archive and get the class loader
+      ClassLoader cl = extractViewArchive(viewDefinition, archiveFile, helper.getFile(archivePath));
+
+      viewConfig = helper.getViewConfigFromExtractedArchive(archivePath);
+
+      setupViewDefinition(viewDefinition, viewConfig, cl);
+
+      Set<ViewInstanceEntity> instanceDefinitions = new HashSet<ViewInstanceEntity>();
+
+      for (InstanceConfig instanceConfig : viewConfig.getInstances()) {
+        ViewInstanceEntity instanceEntity = createViewInstanceDefinition(viewConfig, viewDefinition, instanceConfig);
+        instanceEntity.setXmlDriven(true);
+        instanceDefinitions.add(instanceEntity);
+      }
+      // ensure that the view entity matches the db
+      syncView(viewDefinition, instanceDefinitions);
+
+      onDeploy(viewDefinition);
+
+      // update the registry with the view instances
+      for (ViewInstanceEntity instanceEntity : instanceDefinitions) {
+        addInstanceDefinition(viewDefinition, instanceEntity);
+        handlerList.addViewInstance(instanceEntity);
+      }
+      setViewStatus(viewDefinition, ViewEntity.ViewStatus.LOADED, "Loaded " + archivePath + ".");
+
+    } catch (Exception e) {
+      String msg = "Caught exception loading view " + viewDefinition.getViewName() + " : " + e.getMessage();
+
+      setViewStatus(viewDefinition, ViewEntity.ViewStatus.ERROR, msg);
+      LOG.error(msg, e);
+    }
+  }
+
+  // set the status of the given view.
+  private void setViewStatus(ViewEntity viewDefinition, ViewEntity.ViewStatus status, String statusDetail) {
+    viewDefinition.setStatus(status);
+    viewDefinition.setStatusDetail(statusDetail);
+  }
+
+  // Get the view extraction thread pool
+  private static synchronized ExecutorService getExecutorService(Configuration configuration) {
+    if (executorService == null) {
+      LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>();
+
+      ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(
+          configuration.getViewExtractionThreadPoolCoreSize(),
+          configuration.getViewExtractionThreadPoolMaxSize(),
+          configuration.getViewExtractionThreadPoolTimeout(),
+          TimeUnit.MILLISECONDS,
+          queue);
+
+      threadPoolExecutor.allowCoreThreadTimeOut(true);
+      executorService = threadPoolExecutor;
+    }
+    return executorService;
+  }
+
+
 
 
   // ----- inner class : ViewRegistryHelper ----------------------------------
   // ----- inner class : ViewRegistryHelper ----------------------------------
 
 

+ 7 - 0
ambari-server/src/main/package/rpm/posttrans_server.sh

@@ -16,6 +16,8 @@
 
 
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
 RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
 RESOURCE_MANAGEMENT_DIR_SERVER="/usr/lib/ambari-server/lib/resource_management"
 RESOURCE_MANAGEMENT_DIR_SERVER="/usr/lib/ambari-server/lib/resource_management"
+JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
+JINJA_SERVER_DIR="/usr/lib/ambari-server/lib/ambari_jinja2"
 
 
 # remove RESOURCE_MANAGEMENT_DIR if it's a directory
 # remove RESOURCE_MANAGEMENT_DIR if it's a directory
 if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists
 if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists
@@ -28,4 +30,9 @@ if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
   ln -s "$RESOURCE_MANAGEMENT_DIR_SERVER" "$RESOURCE_MANAGEMENT_DIR"
   ln -s "$RESOURCE_MANAGEMENT_DIR_SERVER" "$RESOURCE_MANAGEMENT_DIR"
 fi
 fi
 
 
+# setting jinja2 shared resource
+if [ ! -d "$JINJA_DIR" ]; then
+  ln -s "$JINJA_SERVER_DIR" "$JINJA_DIR"
+fi
+
 exit 0
 exit 0

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/configuration/cluster-env.xml

@@ -42,6 +42,11 @@
         <property-type>USER</property-type>
         <property-type>USER</property-type>
         <description>User executing service checks</description>
         <description>User executing service checks</description>
     </property>
     </property>
+    <property>
+        <name>smokeuser_keytab</name>
+        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+        <description>Path to smoke test user keytab file</description>
+    </property>
     <property>
     <property>
         <name>user_group</name>
         <name>user_group</name>
         <value>hadoop</value>
         <value>hadoop</value>

+ 2 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py

@@ -24,8 +24,7 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 #java params
 #java params
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
 #hadoop params
 #hadoop params
@@ -60,4 +59,4 @@ hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 
 
 #users and groups
 #users and groups
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']

+ 1 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py

@@ -22,8 +22,7 @@ from resource_management import *
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user

+ 5 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py

@@ -38,8 +38,7 @@ if System.get_instance().os_family == "suse":
 else:
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
   jsvc_path = "/usr/libexec/bigtop-utils"
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 #hadoop params
 #hadoop params
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 
 
@@ -72,12 +71,12 @@ hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_p
 #users and groups
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 nagios_user = config['configurations']['nagios-env']['nagios_user']
 nagios_user = config['configurations']['nagios-env']['nagios_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group =  default("/configurations/hadoop-env/proxyuser_group","users")
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 
 #hosts
 #hosts
@@ -119,7 +118,7 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
   ganglia_server_host = ganglia_server_hosts[0]
 
 
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-ignore_groupsusers_create = default("/configurations/hadoop-env/ignore_groupsusers_create", False)
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
 
 
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 if has_hbase_masters:
 if has_hbase_masters:

+ 11 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py

@@ -24,13 +24,12 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 #users and groups
 #users and groups
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 
 
 #hosts
 #hosts
 hostname = config["hostname"]
 hostname = config["hostname"]
@@ -48,6 +47,7 @@ namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 
 
+has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
 has_slaves = not len(slave_hosts) == 0
 has_nagios = not len(hagios_server_hosts) == 0
 has_nagios = not len(hagios_server_hosts) == 0
@@ -67,7 +67,8 @@ is_slave = hostname in slave_hosts
 if has_ganglia_server:
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
   ganglia_server_host = ganglia_server_hosts[0]
 #hadoop params
 #hadoop params
-hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+if has_namenode:
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
@@ -93,7 +94,7 @@ ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
 
 
-if 'rca_enabled' in config['configurations']['mapred-env']:
+if has_namenode and 'rca_enabled' in config['configurations']['mapred-env']:
   rca_enabled =  config['configurations']['mapred-env']['rca_enabled']
   rca_enabled =  config['configurations']['mapred-env']['rca_enabled']
 else:
 else:
   rca_enabled = False
   rca_enabled = False
@@ -134,6 +135,11 @@ dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
 
 
 #log4j.properties
 #log4j.properties
 rca_properties = format('''
 rca_properties = format('''
+ambari.jobhistory.database={ambari_db_rca_url}
+ambari.jobhistory.driver={ambari_db_rca_driver}
+ambari.jobhistory.user={ambari_db_rca_username}
+ambari.jobhistory.password={ambari_db_rca_password}
+ambari.jobhistory.logger=${{hadoop.root.logger}}
 
 
 log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
 log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
 log4j.appender.JHA.database={ambari_db_rca_url}
 log4j.appender.JHA.database={ambari_db_rca_url}

+ 62 - 59
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py

@@ -33,55 +33,57 @@ def setup_hadoop():
 
 
   install_snappy()
   install_snappy()
 
 
-  #directories
-  Directory(params.hdfs_log_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hadoop_pid_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-
-  #files
-  if params.security_enabled:
-    tc_owner = "root"
-  else:
-    tc_owner = params.hdfs_user
 
 
-    File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-         owner=tc_owner,
-         content=Template("commons-logging.properties.j2")
+  if params.has_namenode:
+    #directories
+    Directory(params.hdfs_log_dir_prefix,
+              recursive=True,
+              owner='root',
+              group='root'
+    )
+    Directory(params.hadoop_pid_dir_prefix,
+              recursive=True,
+              owner='root',
+              group='root'
     )
     )
 
 
-  health_check_template = "health_check" #for stack 1 use 'health_check'
-  File(os.path.join(params.hadoop_conf_dir, "health_check"),
-       owner=tc_owner,
-       content=Template(health_check_template + ".j2")
-  )
+    #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
 
 
-  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-  if (params.log4j_props != None):
-    File(log4j_filename,
-         mode=0644,
-         group=params.user_group,
-         owner=params.hdfs_user,
-         content=params.log4j_props
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           owner=tc_owner,
+           content=Template("commons-logging.properties.j2")
+      )
+
+    health_check_template = "health_check" #for stack 1 use 'health_check'
+    File(os.path.join(params.hadoop_conf_dir, "health_check"),
+         owner=tc_owner,
+         content=Template(health_check_template + ".j2")
     )
     )
-  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-    File(log4j_filename,
-         mode=0644,
-         group=params.user_group,
+
+    log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+    if (params.log4j_props != None):
+      File(log4j_filename,
+           mode=0644,
+           group=params.user_group,
+           owner=params.hdfs_user,
+           content=params.log4j_props
+      )
+    elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+      File(log4j_filename,
+           mode=0644,
+           group=params.user_group,
+           owner=params.hdfs_user,
+      )
+
+    File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
          owner=params.hdfs_user,
          owner=params.hdfs_user,
+         content=Template("hadoop-metrics2.properties.j2")
     )
     )
 
 
-  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-       owner=params.hdfs_user,
-       content=Template("hadoop-metrics2.properties.j2")
-  )
-
 def setup_database():
 def setup_database():
   """
   """
   Load DB
   Load DB
@@ -113,33 +115,34 @@ def setup_configs():
   """
   """
   import params
   import params
 
 
-  File(params.task_log4j_properties_location,
-       content=StaticFile("task-log4j.properties"),
-       mode=0755
-  )
-
-  Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
-       to = '/usr/lib/hadoop/hadoop-tools.jar'
-  )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-         owner=params.hdfs_user,
-         group=params.user_group
+  if params.has_namenode:
+    File(params.task_log4j_properties_location,
+         content=StaticFile("task-log4j.properties"),
+         mode=0755
     )
     )
 
 
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-    File(os.path.join(params.hadoop_conf_dir, 'masters'),
-         owner=params.hdfs_user,
-         group=params.user_group
+    Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
+         to = '/usr/lib/hadoop/hadoop-tools.jar'
     )
     )
 
 
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+
   # generate_include_file()
   # generate_include_file()
 
 
 def generate_include_file():
 def generate_include_file():
   import params
   import params
 
 
-  if params.dfs_hosts and params.has_slaves:
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
     include_hosts_list = params.slave_hosts
     include_hosts_list = params.slave_hosts
     File(params.dfs_hosts,
     File(params.dfs_hosts,
          content=Template("include_hosts_list.j2"),
          content=Template("include_hosts_list.j2"),

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py

@@ -22,7 +22,7 @@ import os
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
-user_group = config['configurations']['hadoop-env']["user_group"]
+user_group = config['configurations']['cluster-env']["user_group"]
 ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir","/etc/ganglia/hdp")
 ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir","/etc/ganglia/hdp")
 ganglia_dir = "/etc/ganglia"
 ganglia_dir = "/etc/ganglia"
 ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
 ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]

+ 5 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py

@@ -36,10 +36,9 @@ hbase_drain_only = config['commandParams']['mark_draining_only']
 hbase_included_hosts = config['commandParams']['included_hosts']
 hbase_included_hosts = config['commandParams']['included_hosts']
 
 
 hbase_user = status_params.hbase_user
 hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-user_group = config['configurations']['hadoop-env']['user_group']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+user_group = config['configurations']['cluster-env']['user_group']
 
 
 # this is "hadoop-metrics2-hbase.properties" for 2.x stacks
 # this is "hadoop-metrics2-hbase.properties" for 2.x stacks
 metric_prop_file_name = "hadoop-metrics.properties"
 metric_prop_file_name = "hadoop-metrics.properties"
@@ -68,7 +67,7 @@ ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_h
 
 
 rs_hosts = config['clusterHostInfo']['slave_hosts'] #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
 rs_hosts = config['clusterHostInfo']['slave_hosts'] #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
 
 
-smoke_test_user = config['configurations']['hadoop-env']['smokeuser']
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smokeuser_permissions = "RWXCA"
 smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 service_check_data = functions.get_unique_id_and_date()
 
 
@@ -79,7 +78,7 @@ if security_enabled:
 
 
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
 if security_enabled:

+ 1 - 31
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml

@@ -61,48 +61,18 @@
     <value>1024</value>
     <value>1024</value>
     <description>DataNode maximum Java heap size</description>
     <description>DataNode maximum Java heap size</description>
   </property>
   </property>
+  <property>
   <property>
   <property>
     <name>proxyuser_group</name>
     <name>proxyuser_group</name>
     <value>users</value>
     <value>users</value>
     <property-type>GROUP</property-type>
     <property-type>GROUP</property-type>
     <description>Proxy user group.</description>
     <description>Proxy user group.</description>
   </property>
   </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-
-  <property>
     <name>hdfs_user</name>
     <name>hdfs_user</name>
     <value>hdfs</value>
     <value>hdfs</value>
     <property-type>USER</property-type>
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
     <description>User to run HDFS as</description>
   </property>
   </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <value>false</value>
-    <description>Whether to ignores failures on users and group creation</description>
-  </property>
-  <property>
-    <name>smokeuser</name>
-    <value>ambari-qa</value>
-    <property-type>USER</property-type>
-    <description>User executing service checks</description>
-  </property>
-  <property>
-    <name>user_group</name>
-    <value>hadoop</value>
-    <property-type>GROUP</property-type>
-    <description>Proxy user group.</description>
-  </property>
-  
   <!-- hadoop-env.sh -->
   <!-- hadoop-env.sh -->
   <property>
   <property>
     <name>content</name>
     <name>content</name>

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py

@@ -27,9 +27,8 @@ tmp_dir = Script.get_tmp_dir()
 ulimit_cmd = "ulimit -c unlimited; "
 ulimit_cmd = "ulimit -c unlimited; "
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 
 
 #exclude file
 #exclude file
@@ -87,11 +86,11 @@ oozie_user = config['configurations']['oozie-env']['oozie_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
 hive_user = config['configurations']['hive-env']['hive_user']
 hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 mapred_user = config['configurations']['mapred-env']['mapred_user']
 mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = status_params.hdfs_user
 hdfs_user = status_params.hdfs_user
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 

+ 24 - 7
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py

@@ -40,24 +40,35 @@ def hive(name=None):
     )
     )
     params.HdfsDirectory(None, action="create")
     params.HdfsDirectory(None, action="create")
   if name == 'metastore' or name == 'hiveserver2':
   if name == 'metastore' or name == 'hiveserver2':
-    config_file_mode = 0600
     jdbc_connector()
     jdbc_connector()
-  else:
-    config_file_mode = 0644
 
 
-  Directory(params.hive_config_dir,
+  Directory(params.hive_conf_dir,
             owner=params.hive_user,
             owner=params.hive_user,
             group=params.user_group,
             group=params.user_group,
             recursive=True
             recursive=True
   )
   )
+  Directory(params.hive_server_conf_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_conf_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644
+  )
 
 
   XmlConfig("hive-site.xml",
   XmlConfig("hive-site.xml",
-            conf_dir=params.hive_config_dir,
+            conf_dir=params.hive_server_conf_dir,
             configurations=params.config['configurations']['hive-site'],
             configurations=params.config['configurations']['hive-site'],
             configuration_attributes=params.config['configuration_attributes']['hive-site'],
             configuration_attributes=params.config['configuration_attributes']['hive-site'],
             owner=params.hive_user,
             owner=params.hive_user,
             group=params.user_group,
             group=params.user_group,
-            mode=config_file_mode
+            mode=0600
   )
   )
 
 
   environment = {
   environment = {
@@ -90,7 +101,13 @@ def hive(name=None):
     crt_directory(params.hive_log_dir)
     crt_directory(params.hive_log_dir)
     crt_directory(params.hive_var_lib)
     crt_directory(params.hive_var_lib)
 
 
-  File(format("{hive_config_dir}/hive-env.sh"),
+  File(format("{hive_conf_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
+  File(format("{hive_server_conf_dir}/hive-env.sh"),
        owner=params.hive_user,
        owner=params.hive_user,
        group=params.user_group,
        group=params.user_group,
        content=InlineTemplate(params.hive_env_sh_template)
        content=InlineTemplate(params.hive_env_sh_template)

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py

@@ -57,13 +57,12 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 
 
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
 smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
 smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
 smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 
 
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
@@ -94,7 +93,7 @@ java_share_dir = '/usr/share/java'
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 
 
 hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 
 
 target = format("{hive_lib}/{jdbc_jar_name}")
 target = format("{hive_lib}/{jdbc_jar_name}")

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py

@@ -36,17 +36,16 @@ tasktracker_pid_file = status_params.tasktracker_pid_file
 
 
 hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
 hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
 hadoop_bin = "/usr/lib/hadoop/bin"
 hadoop_bin = "/usr/lib/hadoop/bin"
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 mapred_log_dir_prefix = hdfs_log_dir_prefix
 mapred_log_dir_prefix = hdfs_log_dir_prefix
 mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
 
 hadoop_jar_location = "/usr/lib/hadoop/"
 hadoop_jar_location = "/usr/lib/hadoop/"
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 
 #exclude file
 #exclude file

+ 2 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py

@@ -79,8 +79,7 @@ clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK
 
 
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 check_cpu_on = is_jdk_greater_6(java64_home)
 check_cpu_on = is_jdk_greater_6(java64_home)
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
 nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
@@ -109,7 +108,7 @@ nagios_user = config['configurations']['nagios-env']['nagios_user']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
 nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
 nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
 nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 
 
 namenode_host = default("/clusterHostInfo/namenode_host", None)
 namenode_host = default("/clusterHostInfo/namenode_host", None)

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py

@@ -27,10 +27,10 @@ tmp_dir = Script.get_tmp_dir()
 
 
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 conf_dir = "/etc/oozie/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
@@ -44,13 +44,12 @@ hadoop_jar_location = "/usr/lib/hadoop/"
 ext_js_path = "/usr/share/HDP-oozie/ext.zip"
 ext_js_path = "/usr/share/HDP-oozie/ext.zip"
 oozie_libext_dir = "/usr/lib/oozie/libext"
 oozie_libext_dir = "/usr/lib/oozie/libext"
 lzo_enabled = config['configurations']['mapred-env']['lzo_enabled']
 lzo_enabled = config['configurations']['mapred-env']['lzo_enabled']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
 oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
 
 
 oracle_driver_jar_name = "ojdbc6.jar"
 oracle_driver_jar_name = "ojdbc6.jar"

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py

@@ -28,11 +28,10 @@ tmp_dir = Script.get_tmp_dir()
 pig_conf_dir = "/etc/pig/conf"
 pig_conf_dir = "/etc/pig/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-user_group = config['configurations']['hadoop-env']['user_group']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 pig_env_sh_template = config['configurations']['pig-env']['content']
 pig_env_sh_template = config['configurations']['pig-env']['content']
 
 

+ 5 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py

@@ -20,10 +20,9 @@ from resource_management import *
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-user_group = config['configurations']['hadoop-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
 sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 
 
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
@@ -33,6 +32,6 @@ zoo_conf_dir = "/etc/zookeeper"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 
-keytab_path = config['configurations']['hadoop-env']['keytab_path']
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+keytab_path = config['configurations']['cluster-env']['keytab_path']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py

@@ -41,15 +41,14 @@ hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.con
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 
 
 hadoop_home = '/usr'
 hadoop_home = '/usr'
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 
 
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
 
 
 webhcat_apps_dir = "/apps/webhcat"
 webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 
 #hdfs directories
 #hdfs directories

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py

@@ -30,7 +30,7 @@ config_dir = "/etc/zookeeper/conf"
 zk_user =  config['configurations']['zookeeper-env']['zk_user']
 zk_user =  config['configurations']['zookeeper-env']['zk_user']
 hostname = config['hostname']
 hostname = config['hostname']
 zk_bin = '/usr/lib/zookeeper/bin'
 zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 
 
 smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
 smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
@@ -63,11 +63,10 @@ zookeeper_hosts.sort()
 zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
 zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
 zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
 zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
 zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
 zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 
 #log4j.properties
 #log4j.properties

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml

@@ -42,6 +42,11 @@
         <property-type>USER</property-type>
         <property-type>USER</property-type>
         <description>User executing service checks</description>
         <description>User executing service checks</description>
     </property>
     </property>
+    <property>
+        <name>smokeuser_keytab</name>
+        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+        <description>Path to smoke test user keytab file</description>
+    </property>
     <property>
     <property>
         <name>user_group</name>
         <name>user_group</name>
         <value>hadoop</value>
         <value>hadoop</value>

+ 5 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -24,8 +24,7 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 #java params
 #java params
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
 #hadoop params
 #hadoop params
@@ -63,4 +62,7 @@ mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefi
 
 
 #users and groups
 #users and groups
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0

+ 25 - 23
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -21,30 +21,32 @@ from resource_management import *
 
 
 def setup_hadoop_env():
 def setup_hadoop_env():
   import params
   import params
-  if params.security_enabled:
-    tc_owner = "root"
-  else:
-    tc_owner = params.hdfs_user
-  Directory(params.hadoop_conf_empty_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Link(params.hadoop_conf_dir,
-       to=params.hadoop_conf_empty_dir,
-       not_if=format("ls {hadoop_conf_dir}")
-  )
-  File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
-       owner=tc_owner,
-       content=InlineTemplate(params.hadoop_env_sh_template)
-  )
+  if params.has_namenode:
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+    Directory(params.hadoop_conf_empty_dir,
+              recursive=True,
+              owner='root',
+              group='root'
+    )
+    Link(params.hadoop_conf_dir,
+         to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}")
+    )
+    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
+         owner=tc_owner,
+         content=InlineTemplate(params.hadoop_env_sh_template)
+    )
 
 
 def setup_config():
 def setup_config():
   import params
   import params
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
+  if params.has_namenode:
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group
   )
   )

+ 1 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -22,8 +22,7 @@ from resource_management import *
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user

+ 5 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py

@@ -29,13 +29,13 @@ tmp_dir = Script.get_tmp_dir()
 #users and groups
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 nagios_user = config['configurations']['nagios-env']['nagios_user']
 nagios_user = config['configurations']['nagios-env']['nagios_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 tez_user = config['configurations']['tez-env']["tez_user"]
 tez_user = config['configurations']['tez-env']["tez_user"]
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 
 #hosts
 #hosts
@@ -84,8 +84,7 @@ if has_ganglia_server:
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 #java params
 #java params
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
@@ -94,7 +93,7 @@ jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
 jce_location = config['hostLevelParams']['jdk_location']
 jce_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/hadoop-env/ignore_groupsusers_create", False)
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
 
 
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 if has_hbase_masters:
 if has_hbase_masters:

+ 7 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -24,15 +24,14 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 #users and groups
 #users and groups
 mapred_user = config['configurations']['mapred-env']['mapred_user']
 mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 yarn_user = config['configurations']['yarn-env']['yarn_user']
 yarn_user = config['configurations']['yarn-env']['yarn_user']
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 
 
 #hosts
 #hosts
 hostname = config["hostname"]
 hostname = config["hostname"]
@@ -50,6 +49,7 @@ namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 
 
+has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
 has_slaves = not len(slave_hosts) == 0
 has_nagios = not len(hagios_server_hosts) == 0
 has_nagios = not len(hagios_server_hosts) == 0
@@ -69,7 +69,9 @@ is_slave = hostname in slave_hosts
 if has_ganglia_server:
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
   ganglia_server_host = ganglia_server_hosts[0]
 #hadoop params
 #hadoop params
-hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+
+if has_namenode:
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
@@ -94,7 +96,7 @@ ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
 
 
-if 'rca_enabled' in config['configurations']['hadoop-env']:
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
   rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
   rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
 else:
 else:
   rca_enabled = False
   rca_enabled = False

+ 63 - 61
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py

@@ -34,58 +34,59 @@ def setup_hadoop():
   install_snappy()
   install_snappy()
 
 
   #directories
   #directories
-  Directory(params.hdfs_log_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hadoop_pid_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
+  if params.has_namenode:
+    Directory(params.hdfs_log_dir_prefix,
+              recursive=True,
+              owner='root',
+              group='root'
+    )
+    Directory(params.hadoop_pid_dir_prefix,
+              recursive=True,
+              owner='root',
+              group='root'
+    )
   #this doesn't needed with stack 1
   #this doesn't needed with stack 1
-  Directory(params.hadoop_tmp_dir,
-            recursive=True,
-            owner=params.hdfs_user,
-            )
+    Directory(params.hadoop_tmp_dir,
+              recursive=True,
+              owner=params.hdfs_user,
+              )
   #files
   #files
-  if params.security_enabled:
-    tc_owner = "root"
-  else:
-    tc_owner = params.hdfs_user
-
-  File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-       owner=tc_owner,
-       content=Template('commons-logging.properties.j2')
-  )
-
-  health_check_template = "health_check-v2" #for stack 1 use 'health_check'
-  File(os.path.join(params.hadoop_conf_dir, "health_check"),
-       owner=tc_owner,
-       content=Template(health_check_template + ".j2")
-  )
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+         owner=tc_owner,
+         content=Template('commons-logging.properties.j2')
+    )
 
 
-  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-  if (params.log4j_props != None):
-    File(log4j_filename,
-         mode=0644,
-         group=params.user_group,
-         owner=params.hdfs_user,
-         content=params.log4j_props
+    health_check_template = "health_check-v2" #for stack 1 use 'health_check'
+    File(os.path.join(params.hadoop_conf_dir, "health_check"),
+         owner=tc_owner,
+         content=Template(health_check_template + ".j2")
     )
     )
-  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-    File(log4j_filename,
-         mode=0644,
-         group=params.user_group,
+
+    log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+    if (params.log4j_props != None):
+      File(log4j_filename,
+           mode=0644,
+           group=params.user_group,
+           owner=params.hdfs_user,
+           content=params.log4j_props
+      )
+    elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+      File(log4j_filename,
+           mode=0644,
+           group=params.user_group,
+           owner=params.hdfs_user,
+      )
+
+    File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
          owner=params.hdfs_user,
          owner=params.hdfs_user,
+         content=Template("hadoop-metrics2.properties.j2")
     )
     )
 
 
-  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-       owner=params.hdfs_user,
-       content=Template("hadoop-metrics2.properties.j2")
-  )
-
 def setup_database():
 def setup_database():
   """
   """
   Load DB
   Load DB
@@ -113,33 +114,34 @@ def setup_database():
 
 
 def setup_configs():
 def setup_configs():
   """
   """
-  Creates configs for services DHFS mapred
+  Creates configs for services HDFS mapred
   """
   """
   import params
   import params
 
 
-  File(params.task_log4j_properties_location,
-       content=StaticFile("task-log4j.properties"),
-       mode=0755
-  )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-    File(os.path.join(params.hadoop_conf_dir, 'masters'),
-              owner=params.hdfs_user,
-              group=params.user_group
+  if params.has_namenode:
+    File(params.task_log4j_properties_location,
+         content=StaticFile("task-log4j.properties"),
+         mode=0755
     )
     )
 
 
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
   generate_include_file()
   generate_include_file()
 
 
 
 
 def generate_include_file():
 def generate_include_file():
   import params
   import params
 
 
-  if params.dfs_hosts and params.has_slaves:
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
     include_hosts_list = params.slave_hosts
     include_hosts_list = params.slave_hosts
     File(params.dfs_hosts,
     File(params.dfs_hosts,
          content=Template("include_hosts_list.j2"),
          content=Template("include_hosts_list.j2"),

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py

@@ -21,7 +21,7 @@ from resource_management import *
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 
 security_enabled = False
 security_enabled = False

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py

@@ -22,7 +22,7 @@ import os
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
-user_group = config['configurations']['hadoop-env']["user_group"]
+user_group = config['configurations']['cluster-env']["user_group"]
 ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir", "/etc/ganglia/hdp")
 ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir", "/etc/ganglia/hdp")
 ganglia_dir = "/etc/ganglia"
 ganglia_dir = "/etc/ganglia"
 ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
 ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]

+ 5 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py

@@ -36,10 +36,9 @@ hbase_drain_only = config['commandParams']['mark_draining_only']
 hbase_included_hosts = config['commandParams']['included_hosts']
 hbase_included_hosts = config['commandParams']['included_hosts']
 
 
 hbase_user = status_params.hbase_user
 hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-user_group = config['configurations']['hadoop-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 # this is "hadoop-metrics.properties" for 1.x stacks
 # this is "hadoop-metrics.properties" for 1.x stacks
 metric_prop_file_name = "hadoop-metrics2-hbase.properties"
 metric_prop_file_name = "hadoop-metrics2-hbase.properties"
@@ -74,9 +73,10 @@ if 'slave_hosts' in config['clusterHostInfo']:
 else:
 else:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
   
   
-smoke_test_user = config['configurations']['hadoop-env']['smokeuser']
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smokeuser_permissions = "RWXCA"
 smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 service_check_data = functions.get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
 
 
 if security_enabled:
 if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
   _hostname_lowercase = config['hostname'].lower()
@@ -85,7 +85,7 @@ if security_enabled:
 
 
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
 if security_enabled:

+ 0 - 27
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml

@@ -67,39 +67,12 @@
     <property-type>GROUP</property-type>
     <property-type>GROUP</property-type>
     <description>Proxy user group.</description>
     <description>Proxy user group.</description>
   </property>
   </property>
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
   <property>
   <property>
     <name>hdfs_user</name>
     <name>hdfs_user</name>
     <value>hdfs</value>
     <value>hdfs</value>
     <property-type>USER</property-type>
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
     <description>User to run HDFS as</description>
   </property>
   </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <value>false</value>
-    <description>Whether to ignores failures on users and group creation</description>
-  </property>
-  <property>
-    <name>smokeuser</name>
-    <value>ambari-qa</value>
-    <property-type>USER</property-type>
-    <description>User executing service checks</description>
-  </property>
-  <property>
-    <name>user_group</name>
-    <value>hadoop</value>
-    <property-type>GROUP</property-type>
-    <description>Proxy user group.</description>
-  </property>
   
   
   <!-- hadoop-env.sh -->
   <!-- hadoop-env.sh -->
   <property>
   <property>

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -27,9 +27,8 @@ tmp_dir = Script.get_tmp_dir()
 ulimit_cmd = "ulimit -c unlimited; "
 ulimit_cmd = "ulimit -c unlimited; "
 
 
 #security params
 #security params
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 
 
@@ -91,12 +90,12 @@ oozie_user = config['configurations']['oozie-env']['oozie_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 webhcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
 hive_user = config['configurations']['hive-env']['hive_user']
 hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 mapred_user = config['configurations']['mapred-env']['mapred_user']
 mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = status_params.hdfs_user
 hdfs_user = status_params.hdfs_user
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 

+ 33 - 9
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py

@@ -40,32 +40,50 @@ def hive(name=None):
     )
     )
     params.HdfsDirectory(None, action="create")
     params.HdfsDirectory(None, action="create")
   if name == 'metastore' or name == 'hiveserver2':
   if name == 'metastore' or name == 'hiveserver2':
-    config_file_mode = 0600
     jdbc_connector()
     jdbc_connector()
-  else:
-    config_file_mode = 0644
 
 
-  Directory(params.hive_config_dir,
+  Directory(params.hive_conf_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+  Directory(params.hive_server_conf_dir,
             owner=params.hive_user,
             owner=params.hive_user,
             group=params.user_group,
             group=params.user_group,
             recursive=True
             recursive=True
   )
   )
 
 
   XmlConfig("mapred-site.xml",
   XmlConfig("mapred-site.xml",
-            conf_dir=params.hive_config_dir,
+            conf_dir=params.hive_conf_dir,
             configurations=params.config['configurations']['mapred-site'],
             configurations=params.config['configurations']['mapred-site'],
             configuration_attributes=params.config['configuration_attributes']['mapred-site'],
             configuration_attributes=params.config['configuration_attributes']['mapred-site'],
             owner=params.hive_user,
             owner=params.hive_user,
             group=params.user_group,
             group=params.user_group,
-            mode=config_file_mode)
+            mode=0644)
 
 
   XmlConfig("hive-site.xml",
   XmlConfig("hive-site.xml",
-            conf_dir=params.hive_config_dir,
+            conf_dir=params.hive_conf_dir,
             configurations=params.config['configurations']['hive-site'],
             configurations=params.config['configurations']['hive-site'],
             configuration_attributes=params.config['configuration_attributes']['hive-site'],
             configuration_attributes=params.config['configuration_attributes']['hive-site'],
             owner=params.hive_user,
             owner=params.hive_user,
             group=params.user_group,
             group=params.user_group,
-            mode=config_file_mode)
+            mode=0644)
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.hive_server_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0600)
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_server_conf_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0600)
 
 
   environment = {
   environment = {
     "no_proxy": format("{ambari_server_hostname}")
     "no_proxy": format("{ambari_server_hostname}")
@@ -80,7 +98,13 @@ def hive(name=None):
           not_if=format("[ -f {check_db_connection_jar_name}]"),
           not_if=format("[ -f {check_db_connection_jar_name}]"),
           environment = environment)
           environment = environment)
 
 
-  File(format("{hive_config_dir}/hive-env.sh"),
+  File(format("{hive_conf_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
+  File(format("{hive_server_conf_dir}/hive-env.sh"),
        owner=params.hive_user,
        owner=params.hive_user,
        group=params.user_group,
        group=params.user_group,
        content=InlineTemplate(params.hive_env_sh_template)
        content=InlineTemplate(params.hive_env_sh_template)

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py

@@ -61,14 +61,13 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 
 
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
 smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
 smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
 smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 
 
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 fs_root = config['configurations']['core-site']['fs.defaultFS']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
@@ -99,7 +98,7 @@ java_share_dir = '/usr/share/java'
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 
 
 hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 
 
 target = format("{hive_lib}/{jdbc_jar_name}")
 target = format("{hive_lib}/{jdbc_jar_name}")

+ 32 - 30
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py

@@ -88,21 +88,31 @@ nagios_principal_name = default("/configurations/nagios-env/nagios_principal_nam
 hadoop_ssl_enabled = False
 hadoop_ssl_enabled = False
 
 
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+namenode_host = default("/clusterHostInfo/namenode_host", None)
 
 
-# different to HDP1    
-if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
-  namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+# - test for HDFS or HCFS (glusterfs)
+if 'namenode_host' in config['clusterHostInfo']:
+  ishdfs_value = "HDFS"
 else:
 else:
-  namenode_port = "50070" 
+  ishdfs_value = None
 
 
-if 'dfs.namenode.secondary.http-address' in config['configurations']['hdfs-site']:
-  snamenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.secondary.http-address'])
-else:
-  snamenode_port = "50071"
+has_namenode = not namenode_host == None
 
 
-if 'dfs.journalnode.http-address' in config['configurations']['hdfs-site']:
-  journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-  datanode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.datanode.http.address'])
+# different to HDP1
+if has_namenode:
+  if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
+    namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+  else:
+    namenode_port = "50070"
+
+  if 'dfs.namenode.secondary.http-address' in config['configurations']['hdfs-site']:
+    snamenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.secondary.http-address'])
+  else:
+    snamenode_port = "50071"
+
+  if 'dfs.journalnode.http-address' in config['configurations']['hdfs-site']:
+    journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
+    datanode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.datanode.http.address'])
 
 
 hbase_master_rpc_port = default('/configurations/hbase-site/hbase.master.port', "60000")
 hbase_master_rpc_port = default('/configurations/hbase-site/hbase.master.port', "60000")
 rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
 rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
@@ -124,15 +134,16 @@ ahs_port = get_port_from_url(config['configurations']['yarn-site']['yarn.timelin
 
 
 # use sensible defaults for checkpoint as they are required by Nagios and 
 # use sensible defaults for checkpoint as they are required by Nagios and 
 # may not be part of hdfs-site.xml on an upgrade
 # may not be part of hdfs-site.xml on an upgrade
-if 'dfs.namenode.checkpoint.period' in config['configurations']['hdfs-site']:
-  dfs_namenode_checkpoint_period = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.period']
-else:
-  dfs_namenode_checkpoint_period = '21600'
+if has_namenode:
+  if 'dfs.namenode.checkpoint.period' in config['configurations']['hdfs-site']:
+    dfs_namenode_checkpoint_period = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.period']
+  else:
+    dfs_namenode_checkpoint_period = '21600'
   
   
-if 'dfs.namenode.checkpoint.txns' in config['configurations']['hdfs-site']:
-  dfs_namenode_checkpoint_txns = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.txns']
-else:
-  dfs_namenode_checkpoint_txns = '1000000'
+  if 'dfs.namenode.checkpoint.txns' in config['configurations']['hdfs-site']:
+    dfs_namenode_checkpoint_txns = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.txns']
+  else:
+    dfs_namenode_checkpoint_txns = '1000000'
 
 
 # this is different for HDP1
 # this is different for HDP1
 nn_metrics_property = "FSNamesystem"
 nn_metrics_property = "FSNamesystem"
@@ -141,9 +152,7 @@ clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK
 
 
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 check_cpu_on = is_jdk_greater_6(java64_home)
 check_cpu_on = is_jdk_greater_6(java64_home)
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
 nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 
@@ -204,16 +213,9 @@ nagios_user = config['configurations']['nagios-env']['nagios_user']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
 nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
 nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
 nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 
 
-# - test for HDFS or HCFS (glusterfs)
-if 'namenode_host' in config['clusterHostInfo']:
-  namenode_host = default("/clusterHostInfo/namenode_host", None)
-  ishdfs_value = "HDFS"
-else:
-  namenode_host = None
-  ishdfs_value = None 
 
 
 _snamenode_host = default("/clusterHostInfo/snamenode_host", None)
 _snamenode_host = default("/clusterHostInfo/snamenode_host", None)
 _jtnode_host = default("/clusterHostInfo/jtnode_host", None)
 _jtnode_host = default("/clusterHostInfo/jtnode_host", None)

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py

@@ -26,10 +26,10 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 conf_dir = "/etc/oozie/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
@@ -42,13 +42,12 @@ hadoop_jar_location = "/usr/lib/hadoop/"
 # for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
 # for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
 ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
 ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
 oozie_libext_dir = "/usr/lib/oozie/libext"
 oozie_libext_dir = "/usr/lib/oozie/libext"
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
 oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
 oozie_env_sh_template = config['configurations']['oozie-env']['content']
 oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py

@@ -29,11 +29,10 @@ pig_conf_dir = "/etc/pig/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-user_group = config['configurations']['hadoop-env']['user_group']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 pig_env_sh_template = config['configurations']['pig-env']['content']
 pig_env_sh_template = config['configurations']['pig-env']['content']
 
 

+ 4 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py

@@ -21,10 +21,9 @@ from resource_management import *
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-user_group = config['configurations']['hadoop-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
 sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 
 
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
@@ -34,6 +33,5 @@ zoo_conf_dir = "/etc/zookeeper"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 
-keytab_path = config['configurations']['hadoop-env']['keytab_path']
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py

@@ -47,15 +47,14 @@ hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.con
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 
 
 hadoop_home = '/usr'
 hadoop_home = '/usr'
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 
 
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
 
 
 webhcat_apps_dir = "/apps/webhcat"
 webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 
 hcat_hdfs_user_dir = format("/user/{hcat_user}")
 hcat_hdfs_user_dir = format("/user/{hcat_user}")

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/mapred-site.xml

@@ -347,7 +347,7 @@
 
 
   <property>
   <property>
     <name>mapreduce.admin.user.env</name>
     <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
     <description>
     <description>
       Additional execution environment entries for map and reduce task processes.
       Additional execution environment entries for map and reduce task processes.
       This is not an additive property. You must preserve the original value if
       This is not an additive property. You must preserve the original value if

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/application_timeline_server.py

@@ -39,12 +39,12 @@ class ApplicationTimelineServer(Script):
     import params
     import params
     env.set_params(params)
     env.set_params(params)
     self.configure(env) # FOR SECURITY
     self.configure(env) # FOR SECURITY
-    service('historyserver', action='start')
+    service('timelineserver', action='start')
 
 
   def stop(self, env):
   def stop(self, env):
     import params
     import params
     env.set_params(params)
     env.set_params(params)
-    service('historyserver', action='stop')
+    service('timelineserver', action='stop')
 
 
   def status(self, env):
   def status(self, env):
     import status_params
     import status_params

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py

@@ -24,7 +24,7 @@ from resource_management import *
 from yarn import yarn
 from yarn import yarn
 from service import service
 from service import service
 
 
-class Histroryserver(Script):
+class HistoryServer(Script):
   def install(self, env):
   def install(self, env):
     self.install_packages(env)
     self.install_packages(env)
 
 
@@ -50,4 +50,4 @@ class Histroryserver(Script):
     check_process_status(status_params.mapred_historyserver_pid_file)
     check_process_status(status_params.mapred_historyserver_pid_file)
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-  Histroryserver().execute()
+  HistoryServer().execute()

+ 13 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py

@@ -28,14 +28,15 @@ tmp_dir = Script.get_tmp_dir()
 
 
 config_dir = "/etc/hadoop/conf"
 config_dir = "/etc/hadoop/conf"
 
 
+ulimit_cmd = "ulimit -c unlimited;"
+
 mapred_user = status_params.mapred_user
 mapred_user = status_params.mapred_user
 yarn_user = status_params.yarn_user
 yarn_user = status_params.yarn_user
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
 
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
 yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 rm_hosts = config['clusterHostInfo']['rm_host']
 rm_hosts = config['clusterHostInfo']['rm_host']
@@ -92,7 +93,7 @@ yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduc
 mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
 mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
 yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 limits_conf_dir = "/etc/security/limits.d"
 limits_conf_dir = "/etc/security/limits.d"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
@@ -109,8 +110,15 @@ if security_enabled:
   _rm_principal_name = _rm_principal_name.replace('_HOST',hostname.lower())
   _rm_principal_name = _rm_principal_name.replace('_HOST',hostname.lower())
   
   
   rm_kinit_cmd = format("{kinit_path_local} -kt {_rm_keytab} {_rm_principal_name};")
   rm_kinit_cmd = format("{kinit_path_local} -kt {_rm_keytab} {_rm_principal_name};")
+
+  # YARN timeline security options are only available in HDP Champlain
+  _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
+  _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
+  _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
+  yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
 else:
 else:
   rm_kinit_cmd = ""
   rm_kinit_cmd = ""
+  yarn_timelineservice_kinit_cmd = ""
 
 
 yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
 yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
 yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
 yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']

+ 18 - 12
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py

@@ -26,7 +26,7 @@ def service(componentName, action='start', serviceName='yarn'):
 
 
   import params
   import params
 
 
-  if (serviceName == 'mapreduce' and componentName == 'historyserver'):
+  if serviceName == 'mapreduce' and componentName == 'historyserver':
     daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
     daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
     pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
     pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
     usr = params.mapred_user
     usr = params.mapred_user
@@ -38,28 +38,34 @@ def service(componentName, action='start', serviceName='yarn'):
   cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
   cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
 
 
   if action == 'start':
   if action == 'start':
-    daemon_cmd = format("{cmd} start {componentName}")
-    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
+    check_process = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    # Remove the pid file if its corresponding process is not running.
+    File(pid_file,
+         action="delete",
+         not_if=check_process)
+
+    # Attempt to start the process. Internally, this is skipped if the process is already running.
     Execute(daemon_cmd,
     Execute(daemon_cmd,
             user=usr,
             user=usr,
-            not_if=no_op
+            not_if=check_process
     )
     )
 
 
-    Execute(no_op,
+    # Ensure that the process with the expected PID exists.
+    Execute(check_process,
             user=usr,
             user=usr,
-            not_if=no_op,
+            not_if=check_process,
             initial_wait=5
             initial_wait=5
     )
     )
 
 
   elif action == 'stop':
   elif action == 'stop':
     daemon_cmd = format("{cmd} stop {componentName}")
     daemon_cmd = format("{cmd} stop {componentName}")
     Execute(daemon_cmd,
     Execute(daemon_cmd,
-            user=usr,
-    )
-    rm_pid = format("rm -f {pid_file}")
-    Execute(rm_pid,
-            user=usr
-    )
+            user=usr)
+
+    File(pid_file,
+         action="delete")
 
 
   elif action == 'refreshQueues':
   elif action == 'refreshQueues':
     refresh_cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
     refresh_cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py

@@ -31,5 +31,5 @@ mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
 
 
 resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
 resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
 nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
 nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
 mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
 mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml

@@ -39,6 +39,7 @@
 
 
         <component>
         <component>
           <name>ZOOKEEPER_CLIENT</name>
           <name>ZOOKEEPER_CLIENT</name>
+          <displayName>ZooKeeper Client</displayName>
           <category>CLIENT</category>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
           <cardinality>1+</cardinality>
           <commandScript>
           <commandScript>

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py

@@ -30,7 +30,7 @@ config_dir = "/etc/zookeeper/conf"
 zk_user =  config['configurations']['zookeeper-env']['zk_user']
 zk_user =  config['configurations']['zookeeper-env']['zk_user']
 hostname = config['hostname']
 hostname = config['hostname']
 zk_bin = '/usr/lib/zookeeper/bin'
 zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 
 
 smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
 smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
@@ -63,11 +63,10 @@ zookeeper_hosts.sort()
 zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
 zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
 zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
 zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
 zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
 zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 
 #log4j.properties
 #log4j.properties

+ 12 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py

@@ -41,27 +41,32 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     for component in componentsList:
     for component in componentsList:
       if component["StackServiceComponents"]["cardinality"] is not None:
       if component["StackServiceComponents"]["cardinality"] is not None:
          componentName = component["StackServiceComponents"]["component_name"]
          componentName = component["StackServiceComponents"]["component_name"]
+         componentDisplayName = component["StackServiceComponents"]["display_name"]
          componentHostsCount = 0
          componentHostsCount = 0
          if component["StackServiceComponents"]["hostnames"] is not None:
          if component["StackServiceComponents"]["hostnames"] is not None:
            componentHostsCount = len(component["StackServiceComponents"]["hostnames"])
            componentHostsCount = len(component["StackServiceComponents"]["hostnames"])
          cardinality = str(component["StackServiceComponents"]["cardinality"])
          cardinality = str(component["StackServiceComponents"]["cardinality"])
          # cardinality types: null, 1+, 1-2, 1, ALL
          # cardinality types: null, 1+, 1-2, 1, ALL
+         message = None
          if "+" in cardinality:
          if "+" in cardinality:
            hostsMin = int(cardinality[:-1])
            hostsMin = int(cardinality[:-1])
-           hostsMax = sys.maxint
+           if componentHostsCount < hostsMin:
+             message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
          elif "-" in cardinality:
          elif "-" in cardinality:
            nums = cardinality.split("-")
            nums = cardinality.split("-")
            hostsMin = int(nums[0])
            hostsMin = int(nums[0])
            hostsMax = int(nums[1])
            hostsMax = int(nums[1])
+           if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
+             message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
          elif "ALL" == cardinality:
          elif "ALL" == cardinality:
-           hostsMin = hostsCount
-           hostsMax = hostsCount
+           if componentHostsCount != hostsCount:
+             message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
          else:
          else:
-           hostsMin = int(cardinality)
-           hostsMax = int(cardinality)
+           if componentHostsCount != int(cardinality):
+             message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
 
 
-         if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
-           items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Cardinality violation, cardinality={0}, hosts count={1}'.format(cardinality, str(componentHostsCount)), "component-name": str(componentName) } )
+         if message is not None:
+           items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
 
 
     # Validating host-usage
     # Validating host-usage
     usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isNotValuable(component)]
     usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isNotValuable(component)]

+ 2 - 5
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/multinode-default.json

@@ -1,7 +1,7 @@
 {
 {
     "configurations" : [
     "configurations" : [
         {
         {
-            "global" : {
+            "nagios-env" : {
                 "nagios_contact" : "admin@localhost"
                 "nagios_contact" : "admin@localhost"
             }
             }
         }
         }
@@ -139,9 +139,6 @@
                 {
                 {
                     "name" : "NAGIOS_SERVER"
                     "name" : "NAGIOS_SERVER"
                 },
                 },
-                {
-                    "name" : "GANGLIA_SERVER"
-                },
                 {
                 {
                     "name" : "ZOOKEEPER_CLIENT"
                     "name" : "ZOOKEEPER_CLIENT"
                 },
                 },
@@ -184,4 +181,4 @@
         "stack_name" : "HDP",
         "stack_name" : "HDP",
         "stack_version" : "2.1"
         "stack_version" : "2.1"
     }
     }
-}
+}

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/blueprints/singlenode-default.json

@@ -1,7 +1,7 @@
 {
 {
     "configurations" : [
     "configurations" : [
         {
         {
-            "global" : {
+            "nagios-env" : {
                 "nagios_contact" : "admin@localhost"
                 "nagios_contact" : "admin@localhost"
             }
             }
         }
         }
@@ -133,4 +133,4 @@
         "stack_name" : "HDP",
         "stack_name" : "HDP",
         "stack_version" : "2.1"
         "stack_version" : "2.1"
     }
     }
-}
+}

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py

@@ -25,9 +25,9 @@ config = Script.get_config()
 
 
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
@@ -45,14 +45,13 @@ falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
 falcon_port = config['configurations']['falcon-env']['falcon_port']
 falcon_port = config['configurations']['falcon-env']['falcon_port']
 falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
 falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
 falcon_startup_properties = config['configurations']['falcon-startup.properties']
 falcon_startup_properties = config['configurations']['falcon-startup.properties']
-smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 
 
 falcon_webapp_dir = '/var/lib/falcon/webapp'
 falcon_webapp_dir = '/var/lib/falcon/webapp'
 flacon_apps_dir = '/apps/falcon'
 flacon_apps_dir = '/apps/falcon'
 #for create_hdfs_directory
 #for create_hdfs_directory
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 hostname = config["hostname"]
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']

+ 3 - 4
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/scripts/params.py

@@ -29,7 +29,7 @@ log_dir = config['configurations']['storm-env']['storm_log_dir']
 pid_dir = status_params.pid_dir
 pid_dir = status_params.pid_dir
 conf_dir = "/etc/storm/conf"
 conf_dir = "/etc/storm/conf"
 local_dir = config['configurations']['storm-site']['storm.local.dir']
 local_dir = config['configurations']['storm-site']['storm.local.dir']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 nimbus_host = config['configurations']['storm-site']['nimbus.host']
 nimbus_host = config['configurations']['storm-site']['nimbus.host']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
@@ -48,12 +48,11 @@ if 'ganglia_server_host' in config['clusterHostInfo'] and \
 else:
 else:
   ganglia_installed = False
   ganglia_installed = False
   
   
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 if security_enabled:
 if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
   _hostname_lowercase = config['hostname'].lower()
-  _kerberos_domain = config['configurations']['hadoop-env']['kerberos_domain']
+  _kerberos_domain = config['configurations']['cluster-env']['kerberos_domain']
   _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
   _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
   storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
   storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
   storm_keytab_path = config['configurations']['storm-env']['storm_keytab']
   storm_keytab_path = config['configurations']['storm-env']['storm_keytab']

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/package/scripts/params.py

@@ -29,4 +29,4 @@ hadoop_home = '/usr'
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 
 
 tez_user = config['configurations']['tez-env']['tez_user']
 tez_user = config['configurations']['tez-env']['tez_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py

@@ -32,10 +32,9 @@ mapred_user = status_params.mapred_user
 yarn_user = status_params.yarn_user
 yarn_user = status_params.yarn_user
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
 
-smokeuser = config['configurations']['hadoop-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
 yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
 kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 rm_hosts = config['clusterHostInfo']['rm_host']
 rm_hosts = config['clusterHostInfo']['rm_host']
@@ -91,7 +90,7 @@ yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduc
 mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
 mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
 yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 limits_conf_dir = "/etc/security/limits.d"
 limits_conf_dir = "/etc/security/limits.d"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"

+ 2 - 5
ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/multinode-default.json

@@ -1,7 +1,7 @@
 {
 {
     "configurations" : [
     "configurations" : [
         {
         {
-            "global" : {
+            "nagios-env" : {
                 "nagios_contact" : "admin@localhost"
                 "nagios_contact" : "admin@localhost"
             }
             }
         }
         }
@@ -134,9 +134,6 @@
                 {
                 {
                     "name" : "NAGIOS_SERVER"
                     "name" : "NAGIOS_SERVER"
                 },
                 },
-                {
-                    "name" : "GANGLIA_SERVER"
-                },
                 {
                 {
                     "name" : "ZOOKEEPER_CLIENT"
                     "name" : "ZOOKEEPER_CLIENT"
                 },
                 },
@@ -179,4 +176,4 @@
         "stack_name" : "HDP",
         "stack_name" : "HDP",
         "stack_version" : "2.1"
         "stack_version" : "2.1"
     }
     }
-}
+}

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.1/blueprints/singlenode-default.json

@@ -1,7 +1,7 @@
 {
 {
     "configurations" : [
     "configurations" : [
         {
         {
-            "global" : {
+            "nagios-env" : {
                 "nagios_contact" : "admin@localhost"
                 "nagios_contact" : "admin@localhost"
             }
             }
         }
         }
@@ -130,4 +130,4 @@
         "stack_name" : "HDP",
         "stack_name" : "HDP",
         "stack_version" : "2.1"
         "stack_version" : "2.1"
     }
     }
-}
+}

+ 4 - 5
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py

@@ -25,9 +25,9 @@ config = Script.get_config()
 
 
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
-smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
 
 
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
@@ -45,14 +45,13 @@ falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
 falcon_port = config['configurations']['falcon-env']['falcon_port']
 falcon_port = config['configurations']['falcon-env']['falcon_port']
 falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
 falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
 falcon_startup_properties = config['configurations']['falcon-startup.properties']
 falcon_startup_properties = config['configurations']['falcon-startup.properties']
-smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 
 
 falcon_webapp_dir = '/var/lib/falcon/webapp'
 falcon_webapp_dir = '/var/lib/falcon/webapp'
 flacon_apps_dir = '/apps/falcon'
 flacon_apps_dir = '/apps/falcon'
 #for create_hdfs_directory
 #for create_hdfs_directory
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 hostname = config["hostname"]
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']

+ 3 - 4
ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py

@@ -29,7 +29,7 @@ log_dir = config['configurations']['storm-env']['storm_log_dir']
 pid_dir = status_params.pid_dir
 pid_dir = status_params.pid_dir
 conf_dir = "/etc/storm/conf"
 conf_dir = "/etc/storm/conf"
 local_dir = config['configurations']['storm-site']['storm.local.dir']
 local_dir = config['configurations']['storm-site']['storm.local.dir']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 nimbus_host = config['configurations']['storm-site']['nimbus.host']
 nimbus_host = config['configurations']['storm-site']['nimbus.host']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
@@ -49,12 +49,11 @@ if 'ganglia_server_host' in config['clusterHostInfo'] and \
 else:
 else:
   ganglia_installed = False
   ganglia_installed = False
   
   
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 if security_enabled:
 if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
   _hostname_lowercase = config['hostname'].lower()
-  _kerberos_domain = config['configurations']['hadoop-env']['kerberos_domain']
+  _kerberos_domain = config['configurations']['cluster-env']['kerberos_domain']
   _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
   _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
   storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
   storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
   storm_keytab_path = config['configurations']['storm-env']['storm_keytab']
   storm_keytab_path = config['configurations']['storm-env']['storm_keytab']

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/configuration/tez-site.xml

@@ -146,7 +146,7 @@
 
 
   <property>
   <property>
     <name>tez.am.env</name>
     <name>tez.am.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
     <description>
     <description>
         Additional execution environment entries for tez. This is not an additive property. You must preserve the original value if
         Additional execution environment entries for tez. This is not an additive property. You must preserve the original value if
         you want to have access to native libraries.
         you want to have access to native libraries.

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py

@@ -29,5 +29,5 @@ hadoop_home = '/usr'
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 
 
 tez_user = config['configurations']['tez-env']['tez_user']
 tez_user = config['configurations']['tez-env']['tez_user']
-user_group = config['configurations']['hadoop-env']['user_group']
+user_group = config['configurations']['cluster-env']['user_group']
 tez_env_sh_template = config['configurations']['tez-env']['content']
 tez_env_sh_template = config['configurations']['tez-env']['content']

+ 8 - 0
ambari-server/src/main/resources/stacks/HDP/2.2.1/services/YARN/metainfo.xml

@@ -22,6 +22,14 @@
     <service>
     <service>
       <name>YARN</name>
       <name>YARN</name>
       <version>2.6.0.2.2</version>
       <version>2.6.0.2.2</version>
+
+      <components>
+        <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <cardinality>1</cardinality>
+        </component>
+      </components>
+
     </service>
     </service>
   </services>
   </services>
   <service>
   <service>

+ 27 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java

@@ -268,6 +268,33 @@ public class StackExtensionHelperTest {
     }
     }
   }
   }
 
 
+  @Test
+  public void testClientConfigFilesInheritance() throws Exception{
+    File stackRoot = new File(stackRootStr);
+    StackInfo stackInfo = new StackInfo();
+    stackInfo.setName("HDP");
+    stackInfo.setVersion("2.0.6");
+    StackExtensionHelper helper = new StackExtensionHelper(injector, stackRoot);
+    helper.populateServicesForStack(stackInfo);
+    helper.fillInfo();
+    List<ServiceInfo> allServices = helper.getAllApplicableServices(stackInfo);
+    for (ServiceInfo serviceInfo : allServices) {
+      if (serviceInfo.getName().equals("ZOOKEEPER")) {
+        List<ComponentInfo> components = serviceInfo.getComponents();
+        assertTrue(components.size() == 2);
+        ComponentInfo componentInfo = components.get(1);
+        List<ClientConfigFileDefinition> clientConfigs = componentInfo.getClientConfigFiles();
+        assertEquals(2,clientConfigs.size());
+        assertEquals("zookeeper-env",clientConfigs.get(0).getDictionaryName());
+        assertEquals("zookeeper-env.sh",clientConfigs.get(0).getFileName());
+        assertEquals("env",clientConfigs.get(0).getType());
+        assertEquals("zookeeper-log4j",clientConfigs.get(1).getDictionaryName());
+        assertEquals("log4j.properties",clientConfigs.get(1).getFileName());
+        assertEquals("env",clientConfigs.get(1).getType());
+      }
+    }
+  }
+
   @Test
   @Test
   public void testMonitoringServicePropertyInheritance() throws Exception{
   public void testMonitoringServicePropertyInheritance() throws Exception{
     File stackRoot = new File(stackRootStr);
     File stackRoot = new File(stackRootStr);

+ 15 - 4
ambari-server/src/test/java/org/apache/ambari/server/configuration/ConfigurationTest.java

@@ -268,15 +268,26 @@ public class ConfigurationTest {
     
     
     Assert.assertEquals(25, conf.getClientThreadPoolSize());
     Assert.assertEquals(25, conf.getClientThreadPoolSize());
     Assert.assertEquals(25, conf.getAgentThreadPoolSize());
     Assert.assertEquals(25, conf.getAgentThreadPoolSize());
-    
+
+    Assert.assertEquals(10, conf.getViewExtractionThreadPoolCoreSize());
+    Assert.assertEquals(20, conf.getViewExtractionThreadPoolMaxSize());
+    Assert.assertEquals(100000L, conf.getViewExtractionThreadPoolTimeout());
+
     ambariProperties = new Properties();
     ambariProperties = new Properties();
     ambariProperties.setProperty("client.threadpool.size.max", "4");
     ambariProperties.setProperty("client.threadpool.size.max", "4");
     ambariProperties.setProperty("agent.threadpool.size.max", "82");
     ambariProperties.setProperty("agent.threadpool.size.max", "82");
+
+    ambariProperties.setProperty("view.extraction.threadpool.size.core", "83");
+    ambariProperties.setProperty("view.extraction.threadpool.size.max", "56");
+    ambariProperties.setProperty("view.extraction.threadpool.timeout", "6000");
+
     conf = new Configuration(ambariProperties);
     conf = new Configuration(ambariProperties);
     
     
     Assert.assertEquals(4, conf.getClientThreadPoolSize());
     Assert.assertEquals(4, conf.getClientThreadPoolSize());
     Assert.assertEquals(82, conf.getAgentThreadPoolSize());
     Assert.assertEquals(82, conf.getAgentThreadPoolSize());
-    
-  }  
-  
+
+    Assert.assertEquals(83, conf.getViewExtractionThreadPoolCoreSize());
+    Assert.assertEquals(56, conf.getViewExtractionThreadPoolMaxSize());
+    Assert.assertEquals(6000L, conf.getViewExtractionThreadPoolTimeout());
+  }
 }
 }

+ 86 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java

@@ -26,6 +26,7 @@ import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 import static org.easymock.EasyMock.verify;
+import static org.easymock.EasyMock.expectLastCall;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
@@ -65,6 +66,7 @@ import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceInfo;
+import javax.persistence.RollbackException;
 import org.easymock.Capture;
 import org.easymock.Capture;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -281,6 +283,90 @@ public class AmbariManagementControllerImplTest {
     verify(injector, clusters, cluster, cluster2, response, response2);
     verify(injector, clusters, cluster, cluster2, response, response2);
   }
   }
 
 
+  /**
+   * Ensure that when the cluster id is provided and the given cluster name is different from the cluster's name
+   * then the cluster rename logic is executed.
+   */
+  @Test
+  public void testUpdateClusters() throws Exception {
+    // member state mocks
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    Injector injector = createStrictMock(Injector.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    ActionManager actionManager = createNiceMock(ActionManager.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+
+    // requests
+    Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
+
+    // expectations
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null);
+    expect(clusterRequest.getClusterName()).andReturn("clusterNew").times(4);
+    expect(clusterRequest.getClusterId()).andReturn(1L).times(4);
+    expect(clusters.getClusterById(1L)).andReturn(cluster);
+    expect(cluster.getClusterName()).andReturn("clusterOld").times(2);
+    cluster.setClusterName("clusterNew");
+    expectLastCall();
+
+    // replay mocks
+    replay(actionManager, cluster, clusters, injector, clusterRequest);
+
+    // test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
+    controller.updateClusters(setRequests, null);
+
+    // assert and verify
+    assertSame(controller, controllerCapture.getValue());
+    verify(actionManager, cluster, clusters, injector, clusterRequest);
+  }
+
+  /**
+   * Ensure that RollbackException is thrown outside the updateClusters method
+   * when a unique constraint violation occurs.
+   */
+  @Test
+  public void testUpdateClusters__RollbackException() throws Exception {
+    // member state mocks
+    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
+    Injector injector = createStrictMock(Injector.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    ActionManager actionManager = createNiceMock(ActionManager.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+
+    // requests
+    Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
+
+    // expectations
+    injector.injectMembers(capture(controllerCapture));
+    expect(injector.getInstance(Gson.class)).andReturn(null);
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null);
+    expect(clusterRequest.getClusterName()).andReturn("clusterNew").times(4);
+    expect(clusterRequest.getClusterId()).andReturn(1L).times(4);
+    expect(clusters.getClusterById(1L)).andReturn(cluster);
+    expect(cluster.getClusterName()).andReturn("clusterOld").times(2);
+    cluster.setClusterName("clusterNew");
+    expectLastCall().andThrow(new RollbackException());
+
+    // replay mocks
+    replay(actionManager, cluster, clusters, injector, clusterRequest);
+
+    // test
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
+    try {
+      controller.updateClusters(setRequests, null);
+      fail("Expected RollbackException");
+    } catch (RollbackException e) {
+      //expected
+    }
+    // assert and verify
+    assertSame(controller, controllerCapture.getValue());
+    verify(actionManager, cluster, clusters, injector, clusterRequest);
+  }
+
   @Test
   @Test
   public void testGetHostComponents() throws Exception {
   public void testGetHostComponents() throws Exception {
     // member state mocks
     // member state mocks

+ 4 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AmbariPrivilegeResourceProviderTest.java

@@ -62,6 +62,7 @@ import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.SecurityHelper;
+import org.apache.ambari.server.view.ViewInstanceHandlerList;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistryTest;
 import org.apache.ambari.server.view.ViewRegistryTest;
 import org.easymock.EasyMock;
 import org.easymock.EasyMock;
@@ -86,6 +87,7 @@ public class AmbariPrivilegeResourceProviderTest {
   private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
   private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
   private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
   private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
   private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
   private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
+  private static final ViewInstanceHandlerList handlerList = createNiceMock(ViewInstanceHandlerList.class);
 
 
   @BeforeClass
   @BeforeClass
   public static void initClass() {
   public static void initClass() {
@@ -96,8 +98,8 @@ public class AmbariPrivilegeResourceProviderTest {
   @Before
   @Before
   public void resetGlobalMocks() {
   public void resetGlobalMocks() {
     ViewRegistry.initInstance(ViewRegistryTest.getRegistry(viewDAO, viewInstanceDAO, userDAO,
     ViewRegistry.initInstance(ViewRegistryTest.getRegistry(viewDAO, viewInstanceDAO, userDAO,
-        memberDAO, privilegeDAO, resourceDAO, resourceTypeDAO, securityHelper));
-    reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO, clusterDAO);
+        memberDAO, privilegeDAO, resourceDAO, resourceTypeDAO, securityHelper, handlerList));
+    reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO, clusterDAO, handlerList);
   }
   }
 
 
   @Test
   @Test

+ 170 - 5
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java

@@ -18,24 +18,32 @@
 
 
 package org.apache.ambari.server.controller.internal;
 package org.apache.ambari.server.controller.internal;
 
 
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.TaskStatusResponse;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.*;
 import org.apache.ambari.server.controller.spi.*;
 import org.apache.ambari.server.controller.spi.*;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
 
 
+import java.io.File;
+import java.io.PrintWriter;
 import java.util.*;
 import java.util.*;
 
 
 import static org.easymock.EasyMock.*;
 import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
 
 
 /**
 /**
  * TaskResourceProvider tests.
  * TaskResourceProvider tests.
  */
  */
+@RunWith(PowerMockRunner.class)
+@PrepareForTest( {ClientConfigResourceProvider.class} )
 public class ClientConfigResourceProviderTest {
 public class ClientConfigResourceProviderTest {
   @Test
   @Test
   public void testCreateResources() throws Exception {
   public void testCreateResources() throws Exception {
@@ -115,6 +123,163 @@ public class ClientConfigResourceProviderTest {
     verify(managementController, response);
     verify(managementController, response);
   }
   }
 
 
+  @Test
+  public void testGetResources() throws Exception {
+    Resource.Type type = Resource.Type.ClientConfig;
+
+    AmbariManagementController managementController = createNiceMock(AmbariManagementController.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+
+    Cluster cluster = createNiceMock(Cluster.class);
+    AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
+    StackId stackId = createNiceMock(StackId.class);
+    ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
+    ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
+    CommandScriptDefinition commandScriptDefinition = createNiceMock(CommandScriptDefinition.class);
+    Config clusterConfig = createNiceMock(Config.class);
+    Host host = createNiceMock(Host.class);
+    Service service = createNiceMock(Service.class);
+    ServiceComponent serviceComponent = createNiceMock(ServiceComponent.class);
+    ServiceComponentHost serviceComponentHost = createNiceMock(ServiceComponentHost.class);
+    ServiceOsSpecific serviceOsSpecific = createNiceMock(ServiceOsSpecific.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+
+    File mockFile = PowerMock.createNiceMock(File.class);
+    Runtime runtime = createMock(Runtime.class);
+    Process process = createNiceMock(Process.class);
+
+    Collection<Config> clusterConfigs = new HashSet<Config>();
+    //Config clusterConfig = new ConfigImpl("config");
+    clusterConfigs.add(clusterConfig);
+    Map<String, Map<String, String>> allConfigTags = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> configTags = new HashMap<String,
+            Map<String, String>>();
+    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<String,
+            Map<String, Map<String, String>>>();
+
+    ClientConfigFileDefinition clientConfigFileDefinition = new ClientConfigFileDefinition();
+    clientConfigFileDefinition.setDictionaryName("pig-env");
+    clientConfigFileDefinition.setFileName("pig-env.sh");
+    clientConfigFileDefinition.setType("env");
+    List <ClientConfigFileDefinition> clientConfigFileDefinitionList = new LinkedList<ClientConfigFileDefinition>();
+    clientConfigFileDefinitionList.add(clientConfigFileDefinition);
+
+    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+        type,
+        PropertyHelper.getPropertyIds(type),
+        PropertyHelper.getKeyPropertyIds(type),
+        managementController);
+
+    // create the request
+    Request request = PropertyHelper.getReadRequest(ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID, "c1",
+            ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID,
+            ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID);
+
+    Predicate predicate = new PredicateBuilder().property(ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("c1").
+        toPredicate();
+
+    String clusterName = "C1";
+    String serviceName = "PIG";
+    String componentName = "PIG";
+    String hostName = "Host100";
+    String desiredState = "INSTALLED";
+
+    String stackName = "S1";
+    String stackVersion = "V1";
+
+    String stackRoot="/tmp/stacks/S1/V1";
+
+    String packageFolder="PIG/package";
+
+    HashMap<String, Host> hosts = new HashMap<String, Host>();
+    hosts.put(hostName,host);
+    HashMap<String, Service> services = new HashMap<String, Service>();
+    services.put(serviceName,service);
+    HashMap<String, ServiceComponent> serviceComponentMap = new HashMap<String, ServiceComponent>();
+    serviceComponentMap.put(componentName,serviceComponent);
+    HashMap<String, ServiceComponentHost> serviceComponentHosts = new HashMap<String, ServiceComponentHost>();
+    serviceComponentHosts.put(componentName, serviceComponentHost);
+    HashMap<String, ServiceOsSpecific> serviceOsSpecificHashMap = new HashMap<String, ServiceOsSpecific>();
+    serviceOsSpecificHashMap.put("key",serviceOsSpecific);
+
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(clusterName, serviceName, componentName, hostName, desiredState, "", null, null, null);
+
+    Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
+    responses.add(shr1);
+
+    // set expectations
+    expect(managementController.getConfigHelper()).andReturn(configHelper);
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getCluster(clusterName)).andReturn(cluster).anyTimes();
+    expect(configHelper.getEffectiveConfigProperties(cluster, configTags)).andReturn(properties);
+    expect(clusterConfig.getType()).andReturn(Configuration.HIVE_CONFIG_TAG).anyTimes();
+    expect(configHelper.getEffectiveConfigAttributes(cluster, configTags)).andReturn(attributes);
+    //!!!!
+    Map<String,String> props = new HashMap<String, String>();
+    props.put(Configuration.HIVE_METASTORE_PASSWORD_PROPERTY, "pass");
+    props.put("key","value");
+    expect(clusterConfig.getProperties()).andReturn(props);
+    expect(configHelper.getEffectiveDesiredTags(cluster, hostName)).andReturn(allConfigTags);
+    //!!!!
+    expect(cluster.getClusterName()).andReturn(clusterName);
+    expect(managementController.getHostComponents((Set<ServiceComponentHostRequest>) anyObject())).andReturn(responses).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+
+    expect(stackId.getStackName()).andReturn(stackName).anyTimes();
+    expect(stackId.getStackVersion()).andReturn(stackVersion).anyTimes();
+
+    expect(ambariMetaInfo.getComponent(stackName, stackVersion, serviceName, componentName)).andReturn(componentInfo);
+    expect(ambariMetaInfo.getServiceInfo(stackName, stackVersion, serviceName)).andReturn(serviceInfo);
+    expect(serviceInfo.getServicePackageFolder()).andReturn(packageFolder);
+    expect(ambariMetaInfo.getComponentCategory((String) anyObject(), (String) anyObject(),
+            (String) anyObject(), (String) anyObject())).andReturn(componentInfo).anyTimes();
+    expect(componentInfo.getCommandScript()).andReturn(commandScriptDefinition);
+    expect(componentInfo.getClientConfigFiles()).andReturn(clientConfigFileDefinitionList);
+    expect(ambariMetaInfo.getStackRoot()).andReturn(new File(stackRoot));
+    expect(cluster.getAllConfigs()).andReturn(clusterConfigs);
+    expect(clusters.getHostsForCluster(clusterName)).andReturn(hosts);
+    expect(cluster.getServices()).andReturn(services);
+    expect(service.getServiceComponents()).andReturn(serviceComponentMap);
+    expect(serviceComponent.getName()).andReturn(componentName);
+    expect(serviceComponent.getServiceComponentHosts()).andReturn(serviceComponentHosts);
+    expect(clusters.getHost(hostName)).andReturn(host);
+
+    HashMap<String, String> rcaParams = new HashMap<String, String>();
+    rcaParams.put("key","value");
+    expect(managementController.getRcaParameters()).andReturn(rcaParams).anyTimes();
+    expect(ambariMetaInfo.getServiceInfo(stackName, stackVersion, serviceName)).andReturn(serviceInfo);
+    expect(serviceInfo.getOsSpecifics()).andReturn(new HashMap<String, ServiceOsSpecific>()).anyTimes();
+    Set<String> userSet = new HashSet<String>();
+    userSet.add("hdfs");
+    expect(configHelper.getPropertyValuesWithPropertyType(stackId, PropertyInfo.PropertyType.USER, cluster)).andReturn(userSet);
+    PowerMock.expectNew(File.class, new Class<?>[]{String.class}, anyObject(String.class)).andReturn(mockFile).anyTimes();
+    PowerMock.createNiceMockAndExpectNew(PrintWriter.class, anyObject());
+    expect(mockFile.getParent()).andReturn("");
+    PowerMock.mockStatic(Runtime.class);
+    expect(Runtime.getRuntime()).andReturn(runtime);
+    expect(mockFile.exists()).andReturn(true);
+    expect(runtime.exec("ambari-python-wrap /tmp/stacks/S1/V1/PIG/package/null generate_configs null " +
+            "/tmp/stacks/S1/V1/PIG/package /tmp/ambari-server/structured-out.json INFO /tmp/ambari-server"))
+            .andReturn(process).once();
+
+    // replay
+    replay(managementController, clusters, cluster, ambariMetaInfo, stackId, componentInfo, commandScriptDefinition,
+            clusterConfig, host, service, serviceComponent, serviceComponentHost, serviceInfo, configHelper,
+            runtime, process);
+    PowerMock.replayAll();
+
+    provider.getResources(request, predicate);
+
+
+
+    // verify
+    verify(managementController, clusters, cluster, ambariMetaInfo, stackId, componentInfo,commandScriptDefinition,
+            clusterConfig, host, service, serviceComponent, serviceComponentHost, serviceInfo, configHelper,
+            runtime, process);
+  }
+
   @Test
   @Test
   public void testDeleteResources() throws Exception {
   public void testDeleteResources() throws Exception {
     Resource.Type type = Resource.Type.ClientConfig;
     Resource.Type type = Resource.Type.ClientConfig;

+ 4 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ViewPrivilegeResourceProviderTest.java

@@ -42,6 +42,7 @@ import org.apache.ambari.server.orm.entities.ViewEntityTest;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntityTest;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntityTest;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.SecurityHelper;
+import org.apache.ambari.server.view.ViewInstanceHandlerList;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistryTest;
 import org.apache.ambari.server.view.ViewRegistryTest;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -77,6 +78,7 @@ public class ViewPrivilegeResourceProviderTest {
   private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
   private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
   private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
   private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
   private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
   private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
+  private static final ViewInstanceHandlerList handlerList = createNiceMock(ViewInstanceHandlerList.class);
 
 
   @BeforeClass
   @BeforeClass
   public static void initClass() {
   public static void initClass() {
@@ -87,8 +89,8 @@ public class ViewPrivilegeResourceProviderTest {
   public void resetGlobalMocks() {
   public void resetGlobalMocks() {
 
 
     ViewRegistry.initInstance(ViewRegistryTest.getRegistry(viewDAO, viewInstanceDAO, userDAO,
     ViewRegistry.initInstance(ViewRegistryTest.getRegistry(viewDAO, viewInstanceDAO, userDAO,
-        memberDAO, privilegeDAO, resourceDAO, resourceTypeDAO, securityHelper));
-    reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO);
+        memberDAO, privilegeDAO, resourceDAO, resourceTypeDAO, securityHelper, handlerList));
+    reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO, handlerList);
   }
   }
 
 
   @Test
   @Test

+ 2 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java

@@ -235,6 +235,7 @@ public class NagiosPropertyProviderTest {
   
   
   @Test
   @Test
   public void testNagiosServiceAlerts() throws Exception {
   public void testNagiosServiceAlerts() throws Exception {
+    module.properties.remove(Configuration.NAGIOS_IGNORE_FOR_SERVICES_KEY); // make sure NAGIOS_IGNORE_FOR_SERVICES_KEY is not set, which could be set by testNagiosServiceAlertsAddIgnore
 
 
     TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
     TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
 
 
@@ -243,6 +244,7 @@ public class NagiosPropertyProviderTest {
         "ServiceInfo/cluster_name",
         "ServiceInfo/cluster_name",
         "ServiceInfo/service_name");
         "ServiceInfo/service_name");
     npp.forceReset();
     npp.forceReset();
+    NagiosPropertyProvider.init(injector);
     
     
     Resource resource = new ResourceImpl(Resource.Type.Service);
     Resource resource = new ResourceImpl(Resource.Type.Service);
     resource.setProperty("ServiceInfo/cluster_name", "c1");
     resource.setProperty("ServiceInfo/cluster_name", "c1");

+ 29 - 1
ambari-server/src/test/java/org/apache/ambari/server/orm/entities/ViewEntityTest.java

@@ -25,6 +25,7 @@ import org.apache.ambari.server.view.configuration.ResourceConfig;
 import org.apache.ambari.server.view.configuration.ResourceConfigTest;
 import org.apache.ambari.server.view.configuration.ResourceConfigTest;
 import org.apache.ambari.server.view.configuration.ViewConfig;
 import org.apache.ambari.server.view.configuration.ViewConfig;
 import org.apache.ambari.server.view.configuration.ViewConfigTest;
 import org.apache.ambari.server.view.configuration.ViewConfigTest;
+import org.apache.ambari.view.ViewDefinition;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -55,7 +56,9 @@ public class ViewEntityTest {
     properties.put("p3", "v3");
     properties.put("p3", "v3");
 
 
     Configuration ambariConfig = new Configuration(properties);
     Configuration ambariConfig = new Configuration(properties);
-    ViewEntity viewEntity = new ViewEntity(viewConfig, ambariConfig, ViewEntityTest.class.getClassLoader(), "view.jar");
+    ViewEntity viewEntity = new ViewEntity(viewConfig, ambariConfig, "view.jar");
+
+    viewEntity.setClassLoader(ViewEntityTest.class.getClassLoader());
 
 
     ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
     ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity();
     resourceTypeEntity.setId(10);
     resourceTypeEntity.setId(10);
@@ -238,4 +241,29 @@ public class ViewEntityTest {
     Assert.assertEquals("v2", configuration.getProperty("p2"));
     Assert.assertEquals("v2", configuration.getProperty("p2"));
     Assert.assertEquals("v3", configuration.getProperty("p3"));
     Assert.assertEquals("v3", configuration.getProperty("p3"));
   }
   }
+
+  @Test
+  public void testGetSetStatus() throws Exception {
+    ViewEntity viewDefinition = getViewEntity();
+
+    viewDefinition.setStatus(ViewDefinition.ViewStatus.PENDING);
+    Assert.assertEquals(ViewDefinition.ViewStatus.PENDING, viewDefinition.getStatus());
+
+    viewDefinition.setStatus(ViewDefinition.ViewStatus.LOADING);
+    Assert.assertEquals(ViewDefinition.ViewStatus.LOADING, viewDefinition.getStatus());
+
+    viewDefinition.setStatus(ViewDefinition.ViewStatus.LOADED);
+    Assert.assertEquals(ViewDefinition.ViewStatus.LOADED, viewDefinition.getStatus());
+
+    viewDefinition.setStatus(ViewDefinition.ViewStatus.ERROR);
+    Assert.assertEquals(ViewDefinition.ViewStatus.ERROR, viewDefinition.getStatus());
+  }
+
+  @Test
+  public void testGetSetStatusDetail() throws Exception {
+    ViewEntity viewDefinition = getViewEntity();
+
+    viewDefinition.setStatusDetail("status detail");
+    Assert.assertEquals("status detail", viewDefinition.getStatusDetail());
+  }
 }
 }

+ 97 - 0
ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariAuthorizationProviderDisableUserTest.java

@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.security.authorization;
+
+import org.apache.ambari.server.orm.dao.MemberDAO;
+import org.apache.ambari.server.orm.dao.PrivilegeDAO;
+import org.apache.ambari.server.orm.dao.UserDAO;
+import org.apache.ambari.server.orm.entities.UserEntity;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.springframework.security.authentication.DisabledException;
+import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
+import org.springframework.security.authentication.dao.DaoAuthenticationProvider;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.crypto.password.PasswordEncoder;
+import org.springframework.security.crypto.password.StandardPasswordEncoder;
+
+public class AmbariAuthorizationProviderDisableUserTest {
+
+  private UserDAO userDAO;
+  
+  private PasswordEncoder encoder = new StandardPasswordEncoder();
+  
+  private DaoAuthenticationProvider daoProvider;
+
+  private AmbariLdapAuthoritiesPopulator ldapPopulator;
+
+  @Before
+  public void setUp() {
+    userDAO = Mockito.mock(UserDAO.class);
+    
+    createUser("activeUser", true);
+    createUser("disabledUser", false);
+    
+    MemberDAO memberDao = Mockito.mock(MemberDAO.class);
+    PrivilegeDAO privilegeDao = Mockito.mock(PrivilegeDAO.class);
+    AuthorizationHelper authorizationHelper = new AuthorizationHelper();
+    
+    AmbariLocalUserDetailsService uds = new AmbariLocalUserDetailsService(null,null,authorizationHelper,userDAO,memberDao,privilegeDao);
+    daoProvider = new DaoAuthenticationProvider();
+    daoProvider.setUserDetailsService(uds);
+    daoProvider.setPasswordEncoder(encoder);
+    
+    ldapPopulator = new AmbariLdapAuthoritiesPopulator(authorizationHelper, userDAO, memberDao, privilegeDao);
+    
+  }
+  
+  @Test public void testDisabledUserViaDaoProvider(){
+    try{
+      daoProvider.authenticate(new UsernamePasswordAuthenticationToken("disabledUser","pwd"));
+      Assert.fail("Disabled user passes authentication");
+    }catch(DisabledException e){
+      //expected
+      Assert.assertEquals("User is disabled", e.getMessage());//UI depends on this
+    }
+    Authentication auth = daoProvider.authenticate(new UsernamePasswordAuthenticationToken("activeUser","pwd"));
+    Assert.assertNotNull(auth);
+    Assert.assertTrue(auth.isAuthenticated());
+  }
+
+  @Test public void testDisabledUserViaLdapProvider(){
+    try{
+      ldapPopulator.getGrantedAuthorities(null, "disabledUser");
+      Assert.fail("Disabled user passes authentication");
+    }catch(DisabledException e){
+      //expected
+      Assert.assertEquals("User is disabled", e.getMessage());//UI depends on this
+    }
+  }
+  
+  private void createUser(String login, boolean isActive) {
+    UserEntity activeUser = new UserEntity();
+    activeUser.setActive(isActive);
+    activeUser.setUserName(login);
+    activeUser.setUserPassword(encoder.encode("pwd"));
+    Mockito.when(userDAO.findLocalUserByName(login)).thenReturn(activeUser);
+    Mockito.when(userDAO.findLdapUserByName(login)).thenReturn(activeUser);
+  }
+}

+ 2 - 0
ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestAmbariLdapAuthoritiesPopulator.java

@@ -63,6 +63,7 @@ public class TestAmbariLdapAuthoritiesPopulator extends EasyMockSupport {
         .withConstructor(helper, userDAO, memberDAO, privilegeDAO).createMock();
         .withConstructor(helper, userDAO, memberDAO, privilegeDAO).createMock();
 
 
     expect(userEntity.getPrincipal()).andReturn(principalEntity);
     expect(userEntity.getPrincipal()).andReturn(principalEntity);
+    expect(userEntity.getActive()).andReturn(true);
     expect(memberDAO.findAllMembersByUser(userEntity)).andReturn(Collections.singletonList(memberEntity));
     expect(memberDAO.findAllMembersByUser(userEntity)).andReturn(Collections.singletonList(memberEntity));
     expect(memberEntity.getGroup()).andReturn(groupEntity);
     expect(memberEntity.getGroup()).andReturn(groupEntity);
     expect(groupEntity.getPrincipal()).andReturn(groupPrincipalEntity);
     expect(groupEntity.getPrincipal()).andReturn(groupPrincipalEntity);
@@ -87,6 +88,7 @@ public class TestAmbariLdapAuthoritiesPopulator extends EasyMockSupport {
         .withConstructor(helper, userDAO, memberDAO, privilegeDAO).createMock();
         .withConstructor(helper, userDAO, memberDAO, privilegeDAO).createMock();
 
 
     expect(userEntity.getPrincipal()).andReturn(principalEntity).anyTimes();
     expect(userEntity.getPrincipal()).andReturn(principalEntity).anyTimes();
+    expect(userEntity.getActive()).andReturn(true);
     expect(memberDAO.findAllMembersByUser(userEntity)).andReturn(Collections.singletonList(memberEntity)).anyTimes();
     expect(memberDAO.findAllMembersByUser(userEntity)).andReturn(Collections.singletonList(memberEntity)).anyTimes();
     expect(memberEntity.getGroup()).andReturn(groupEntity).anyTimes();
     expect(memberEntity.getGroup()).andReturn(groupEntity).anyTimes();
     expect(groupEntity.getPrincipal()).andReturn(groupPrincipalEntity).anyTimes();
     expect(groupEntity.getPrincipal()).andReturn(groupPrincipalEntity).anyTimes();

+ 42 - 10
ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java

@@ -76,10 +76,12 @@ import org.apache.ambari.server.view.configuration.ViewConfig;
 import org.apache.ambari.server.view.configuration.ViewConfigTest;
 import org.apache.ambari.server.view.configuration.ViewConfigTest;
 import org.apache.ambari.server.view.events.EventImpl;
 import org.apache.ambari.server.view.events.EventImpl;
 import org.apache.ambari.server.view.events.EventImplTest;
 import org.apache.ambari.server.view.events.EventImplTest;
+import org.apache.ambari.view.ViewDefinition;
 import org.apache.ambari.view.events.Event;
 import org.apache.ambari.view.events.Event;
 import org.apache.ambari.view.events.Listener;
 import org.apache.ambari.view.events.Listener;
 import org.easymock.EasyMock;
 import org.easymock.EasyMock;
 import org.junit.Assert;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.Test;
 import org.springframework.security.core.GrantedAuthority;
 import org.springframework.security.core.GrantedAuthority;
 
 
@@ -164,7 +166,6 @@ public class ViewRegistryTest {
   public void testReadViewArchives() throws Exception {
   public void testReadViewArchives() throws Exception {
     ViewRegistry registry = getRegistry();
     ViewRegistry registry = getRegistry();
 
 
-    Configuration configuration = createNiceMock(Configuration.class);
     File viewDir = createNiceMock(File.class);
     File viewDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
     File viewArchive = createNiceMock(File.class);
     File viewArchive = createNiceMock(File.class);
@@ -219,6 +220,10 @@ public class ViewRegistryTest {
     expect(configuration.getViewsDir()).andReturn(viewDir);
     expect(configuration.getViewsDir()).andReturn(viewDir);
     expect(viewDir.getAbsolutePath()).andReturn("/var/lib/ambari-server/resources/views");
     expect(viewDir.getAbsolutePath()).andReturn("/var/lib/ambari-server/resources/views");
 
 
+    expect(configuration.getViewExtractionThreadPoolCoreSize()).andReturn(2).anyTimes();
+    expect(configuration.getViewExtractionThreadPoolMaxSize()).andReturn(3).anyTimes();
+    expect(configuration.getViewExtractionThreadPoolTimeout()).andReturn(10000L).anyTimes();
+
     expect(viewDir.listFiles()).andReturn(new File[]{viewArchive});
     expect(viewDir.listFiles()).andReturn(new File[]{viewArchive});
 
 
     expect(viewArchive.isDirectory()).andReturn(false);
     expect(viewArchive.isDirectory()).andReturn(false);
@@ -270,20 +275,32 @@ public class ViewRegistryTest {
 
 
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
 
 
-    Set<ViewInstanceEntity> instanceEntities = registry.readViewArchives(configuration);
+    registry.readViewArchives();
+
+    ViewEntity view = null;
+
+    // Wait for the view load to complete.
+    long timeout = System.currentTimeMillis() + 10000L;
+    while ((view == null || !view.getStatus().equals(ViewDefinition.ViewStatus.LOADED))&&
+        System.currentTimeMillis() < timeout) {
+      view = registry.getDefinition("MY_VIEW", "1.0.0");
+    }
+
+    Assert.assertNotNull(view);
+    Assert.assertEquals(ViewDefinition.ViewStatus.LOADED, view.getStatus());
 
 
-    Assert.assertEquals(2, instanceEntities.size());
+    Assert.assertEquals(2, registry.getInstanceDefinitions(view).size());
 
 
     // verify mocks
     // verify mocks
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
         libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, resourceDAO, viewDAO, viewInstanceDAO);
         libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, resourceDAO, viewDAO, viewInstanceDAO);
   }
   }
 
 
+  @Ignore
   @Test
   @Test
   public void testReadViewArchives_exception() throws Exception {
   public void testReadViewArchives_exception() throws Exception {
     ViewRegistry registry = getRegistry();
     ViewRegistry registry = getRegistry();
 
 
-    Configuration configuration = createNiceMock(Configuration.class);
     File viewDir = createNiceMock(File.class);
     File viewDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
     File viewArchive = createNiceMock(File.class);
     File viewArchive = createNiceMock(File.class);
@@ -388,9 +405,19 @@ public class ViewRegistryTest {
 
 
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
 
 
-    Set<ViewInstanceEntity> instanceEntities = registry.readViewArchives(configuration);
+    registry.readViewArchives();
 
 
-    Assert.assertEquals(0, instanceEntities.size());
+    ViewEntity view = null;
+
+    // Wait for the view load to complete.
+    long timeout = System.currentTimeMillis() + 10000L;
+    while ((view == null || !view.getStatus().equals(ViewDefinition.ViewStatus.ERROR))&&
+        System.currentTimeMillis() < timeout) {
+      view = registry.getDefinition("MY_VIEW", "1.0.0");
+    }
+
+    Assert.assertNotNull(view);
+    Assert.assertEquals(ViewDefinition.ViewStatus.ERROR, view.getStatus());
 
 
     // verify mocks
     // verify mocks
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
@@ -885,10 +912,10 @@ public class ViewRegistryTest {
   private static ViewRegistry getRegistry() {
   private static ViewRegistry getRegistry() {
     ViewRegistry instance = getRegistry(viewDAO, viewInstanceDAO,
     ViewRegistry instance = getRegistry(viewDAO, viewInstanceDAO,
         userDAO, memberDAO, privilegeDAO,
         userDAO, memberDAO, privilegeDAO,
-        resourceDAO, resourceTypeDAO, securityHelper);
+        resourceDAO, resourceTypeDAO, securityHelper, handlerList);
 
 
     reset(viewDAO, resourceDAO, viewInstanceDAO, userDAO, memberDAO,
     reset(viewDAO, resourceDAO, viewInstanceDAO, userDAO, memberDAO,
-        privilegeDAO, resourceTypeDAO, securityHelper, configuration);
+        privilegeDAO, resourceTypeDAO, securityHelper, configuration, handlerList);
 
 
     return instance;
     return instance;
   }
   }
@@ -896,7 +923,8 @@ public class ViewRegistryTest {
   public static ViewRegistry getRegistry(ViewDAO viewDAO, ViewInstanceDAO viewInstanceDAO,
   public static ViewRegistry getRegistry(ViewDAO viewDAO, ViewInstanceDAO viewInstanceDAO,
                                   UserDAO userDAO, MemberDAO memberDAO,
                                   UserDAO userDAO, MemberDAO memberDAO,
                                   PrivilegeDAO privilegeDAO, ResourceDAO resourceDAO,
                                   PrivilegeDAO privilegeDAO, ResourceDAO resourceDAO,
-                                  ResourceTypeDAO resourceTypeDAO, SecurityHelper securityHelper ) {
+                                  ResourceTypeDAO resourceTypeDAO, SecurityHelper securityHelper,
+                                  ViewInstanceHandlerList handlerList) {
 
 
     ViewRegistry instance = new ViewRegistry();
     ViewRegistry instance = new ViewRegistry();
 
 
@@ -918,7 +946,11 @@ public class ViewRegistryTest {
                                      ClassLoader cl, String archivePath) throws Exception{
                                      ClassLoader cl, String archivePath) throws Exception{
     ViewRegistry registry = getRegistry();
     ViewRegistry registry = getRegistry();
 
 
-    return registry.createViewDefinition(viewConfig, ambariConfig, cl, archivePath);
+    ViewEntity viewDefinition = new ViewEntity(viewConfig, ambariConfig, archivePath);
+
+    registry.setupViewDefinition(viewDefinition, viewConfig, cl);
+
+    return viewDefinition;
   }
   }
 
 
   public static ViewInstanceEntity getViewInstanceEntity(ViewEntity viewDefinition, InstanceConfig instanceConfig) throws Exception {
   public static ViewInstanceEntity getViewInstanceEntity(ViewEntity viewDefinition, InstanceConfig instanceConfig) throws Exception {

+ 36 - 0
ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_client.py

@@ -33,6 +33,11 @@ class TestHiveClient(RMFTestCase):
       group = 'hadoop',
       group = 'hadoop',
       recursive = True,
       recursive = True,
     )
     )
+    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
@@ -41,6 +46,14 @@ class TestHiveClient(RMFTestCase):
       configurations = self.getConfig()['configurations']['hive-site'],
       configurations = self.getConfig()['configurations']['hive-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
       configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
     )
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 0600,
+      conf_dir = '/etc/hive/conf.server',
+      configurations = self.getConfig()['configurations']['hive-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
+    )
     self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
     self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
       not_if = '[ -f DBConnectionVerification.jar]',
       not_if = '[ -f DBConnectionVerification.jar]',
       environment = {'no_proxy': 'c6401.ambari.apache.org'}
       environment = {'no_proxy': 'c6401.ambari.apache.org'}
@@ -50,6 +63,11 @@ class TestHiveClient(RMFTestCase):
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
     )
     )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+      content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+      owner = 'hive',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
     self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
@@ -87,6 +105,11 @@ class TestHiveClient(RMFTestCase):
       group = 'hadoop',
       group = 'hadoop',
       recursive = True,
       recursive = True,
     )
     )
+    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
@@ -95,6 +118,14 @@ class TestHiveClient(RMFTestCase):
       configurations = self.getConfig()['configurations']['hive-site'],
       configurations = self.getConfig()['configurations']['hive-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
       configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
     )
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 0600,
+      conf_dir = '/etc/hive/conf.server',
+      configurations = self.getConfig()['configurations']['hive-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
+    )
     self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
     self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
       not_if = '[ -f DBConnectionVerification.jar]',
       not_if = '[ -f DBConnectionVerification.jar]',
       environment = {'no_proxy': 'c6401.ambari.apache.org'}
       environment = {'no_proxy': 'c6401.ambari.apache.org'}
@@ -104,6 +135,11 @@ class TestHiveClient(RMFTestCase):
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
     )
     )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+      content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+      owner = 'hive',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
     self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',

+ 36 - 0
ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_metastore.py

@@ -164,11 +164,24 @@ class TestHiveMetastore(RMFTestCase):
       path = ['/bin', '/usr/bin/'],
       path = ['/bin', '/usr/bin/'],
       not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
       not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
+    self.assertResourceCalled('Directory', '/etc/hive/conf',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
       recursive = True,
       recursive = True,
     )
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 0644,
+      conf_dir = '/etc/hive/conf',
+      configurations = self.getConfig()['configurations']['hive-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
+    )
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
@@ -203,6 +216,11 @@ class TestHiveMetastore(RMFTestCase):
       mode = 0755,
       mode = 0755,
       recursive = True,
       recursive = True,
     )
     )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh',
+      content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+      owner = 'hive',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
     self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
       content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
       content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
       owner = 'hive',
       owner = 'hive',
@@ -223,11 +241,24 @@ class TestHiveMetastore(RMFTestCase):
       path = ['/bin', '/usr/bin/'],
       path = ['/bin', '/usr/bin/'],
       not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
       not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
+    self.assertResourceCalled('Directory', '/etc/hive/conf',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
       recursive = True,
       recursive = True,
     )
     )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 0644,
+      conf_dir = '/etc/hive/conf',
+      configurations = self.getConfig()['configurations']['hive-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hive-site']
+    )
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
     self.assertResourceCalled('XmlConfig', 'hive-site.xml',
       owner = 'hive',
       owner = 'hive',
       group = 'hadoop',
       group = 'hadoop',
@@ -262,6 +293,11 @@ class TestHiveMetastore(RMFTestCase):
       mode = 0755,
       mode = 0755,
       recursive = True,
       recursive = True,
     )
     )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh',
+      content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
+      owner = 'hive',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
     self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
       content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
       content = InlineTemplate(self.getConfig()['configurations']['hive-env']['content']),
       owner = 'hive',
       owner = 'hive',

Vissa filer visades inte eftersom för många filer har ändrats