Jelajahi Sumber

Merge branch 'trunk' into branch-alerts-dev

Conflicts:
	ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
	ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
Jonathan Hurley 10 tahun lalu
induk
melakukan
a533c1b4f3
100 mengubah file dengan 1360 tambahan dan 1057 penghapusan
  1. 2 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/CreateViewInstanceCtrl.js
  2. 0 154
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/EditViewInstanceCtrl.js
  3. 46 28
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsEditCtrl.js
  4. 0 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
  5. 10 1
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersListCtrl.js
  6. 7 2
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersShowCtrl.js
  7. 1 0
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/User.js
  8. 4 3
      ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/View.js
  9. 24 0
      ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
  10. 9 0
      ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/create.html
  11. 31 10
      ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/edit.html
  12. 4 4
      ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
  13. 1 2
      ambari-admin/src/main/resources/ui/admin-web/app/views/groups/list.html
  14. 9 0
      ambari-admin/src/main/resources/ui/admin-web/app/views/users/list.html
  15. 4 4
      ambari-admin/src/main/resources/ui/admin-web/app/views/users/modals/changePassword.html
  16. 8 3
      ambari-admin/src/main/resources/ui/admin-web/app/views/users/show.html
  17. 1 0
      ambari-admin/src/main/resources/view.xml
  18. 7 2
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  19. 26 29
      ambari-agent/src/main/python/ambari_agent/Controller.py
  20. 3 3
      ambari-agent/src/main/python/ambari_agent/DataCleaner.py
  21. 2 7
      ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
  22. 64 12
      ambari-agent/src/test/python/ambari_agent/TestController.py
  23. 15 0
      ambari-agent/src/test/python/ambari_agent/TestDataCleaner.py
  24. 4 11
      ambari-common/src/main/python/ambari_commons/firewall.py
  25. 6 10
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
  26. 30 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceConfigVersionResponse.java
  27. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java
  28. 15 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
  29. 10 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceConfigVersionResourceProvider.java
  30. 1 14
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewInstanceResourceProvider.java
  31. 0 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
  32. 14 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
  33. 18 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ViewDAO.java
  34. 28 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
  35. 12 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewInstanceEntity.java
  36. 5 0
      ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
  37. 130 29
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
  38. 23 27
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
  39. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
  40. 3 5
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
  41. 2 10
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java
  42. 13 23
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java
  43. 71 0
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
  44. 80 224
      ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
  45. 10 5
      ambari-server/src/main/python/ambari-server.py
  46. 45 14
      ambari-server/src/main/python/ambari_server/utils.py
  47. 3 1
      ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
  48. 3 1
      ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
  49. 4 1
      ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
  50. 7 3
      ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
  51. 3 0
      ambari-server/src/main/resources/properties.json
  52. 3 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
  53. 7 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
  54. 1 4
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
  55. 2 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
  56. 22 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
  57. 31 22
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-properties.xml
  58. 0 52
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig.properties
  59. 7 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
  60. 2 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
  61. 8 16
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
  62. 5 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/configuration/sqoop-env.xml
  63. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py
  64. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
  65. 8 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
  66. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
  67. 12 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
  68. 1 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  69. 2 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
  70. 22 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
  71. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
  72. 3 13
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
  73. 0 12
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
  74. 5 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/configuration/sqoop-env.xml
  75. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
  76. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
  77. 13 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
  78. 22 3
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/metainfo.xml
  79. 7 0
      ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java
  80. 7 0
      ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java
  81. 7 0
      ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java
  82. 7 0
      ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
  83. 14 6
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AmbariPrivilegeResourceProviderTest.java
  84. 14 7
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ViewPrivilegeResourceProviderTest.java
  85. 40 13
      ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java
  86. 95 147
      ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java
  87. 73 13
      ambari-server/src/test/python/TestAmbariServer.py
  88. 5 3
      ambari-server/src/test/python/TestOSCheck.py
  89. 38 8
      ambari-server/src/test/python/TestUtils.py
  90. 4 9
      ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
  91. 4 9
      ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
  92. 4 9
      ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
  93. 10 4
      ambari-server/src/test/python/stacks/1.3.2/PIG/test_pig_client.py
  94. 2 1
      ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
  95. 5 1
      ambari-server/src/test/python/stacks/1.3.2/configs/default.json
  96. 2 1
      ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
  97. 5 1
      ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
  98. 5 0
      ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
  99. 4 9
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
  100. 4 9
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py

+ 2 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/CreateViewInstanceCtrl.js

@@ -34,7 +34,8 @@ angular.module('ambariAdminConsole')
         visible: true,
         visible: true,
         icon_path: '',
         icon_path: '',
         icon64_path: '',
         icon64_path: '',
-        properties: viewVersion.ViewVersionInfo.parameters
+        properties: viewVersion.ViewVersionInfo.parameters,
+        description: ''
       };    
       };    
     });
     });
   }
   }

+ 0 - 154
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/EditViewInstanceCtrl.js

@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-'use strict';
-
-angular.module('ambariAdminConsole')
-.controller('EditViewInstanceCtrl', ['$scope', 'View', 'uiAlert', 'PermissionLoader', 'PermissionSaver', 'instance', '$modalInstance', '$modal', function($scope, View, uiAlert, PermissionLoader, PermissionSaver, instance, $modalInstance, $modal) {
-
-  $scope.instance = instance;
-  $scope.settings = {
-    'visible': $scope.instance.ViewInstanceInfo.visible,
-    'label': $scope.instance.ViewInstanceInfo.label
-  };
-  $scope.configuration = angular.copy($scope.instance.ViewInstanceInfo.properties);
-  
-  function reloadViewPrivilegies(){
-    PermissionLoader.getViewPermissions({
-      viewName: $scope.instance.ViewInstanceInfo.view_name,
-      version: $scope.instance.ViewInstanceInfo.version,
-      instanceId: $scope.instance.ViewInstanceInfo.instance_name
-    })
-    .then(function(permissions) {
-      // Refresh data for rendering
-      $scope.permissionsEdit = permissions;
-      $scope.permissions = angular.copy(permissions);
-    })
-    .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
-    });
-  }
-
-  $scope.permissions = [];
-  
-  reloadViewPrivilegies();
-
-  $scope.edit = {};
-  $scope.edit.editSettingsDisabled = true;
-  
-
-  $scope.saveSettings = function() {
-    View.updateInstance($scope.instance.ViewInstanceInfo.view_name, $scope.instance.ViewInstanceInfo.version, $scope.instance.ViewInstanceInfo.instance_name, {
-      'ViewInstanceInfo':{
-        'visible': $scope.settings.visible,
-        'label': $scope.settings.label
-      }
-    })
-    .success(function() {
-      $scope.edit.editSettingsDisabled = true;
-    })
-    .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
-    });
-  };
-
-  $scope.cancelSettings = function() {
-    $scope.settings = {
-      'visible': $scope.instance.ViewInstanceInfo.visible,
-      'label': $scope.instance.ViewInstanceInfo.label
-    };
-    $scope.edit.editSettingsDisabled = true;
-  };
-
-  $scope.edit.editConfigurationDisabled = true;
-
-  $scope.saveConfiguration = function() {
-    View.updateInstance($scope.instance.ViewInstanceInfo.view_name, $scope.instance.ViewInstanceInfo.version, $scope.instance.ViewInstanceInfo.instance_name, {
-      'ViewInstanceInfo':{
-        'properties': $scope.configuration
-      }
-    })
-    .success(function() {
-      $scope.edit.editConfigurationDisabled = true;
-    })
-    .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
-    });
-  };
-  $scope.cancelConfiguration = function() {
-    $scope.configuration = angular.copy($scope.instance.ViewInstanceInfo.properties);
-    $scope.edit.editConfigurationDisabled = true;
-  };
-
-  // Permissions edit
-  $scope.edit.editPermissionDisabled = true;
-  $scope.cancelPermissions = function() {
-    $scope.permissionsEdit = angular.copy($scope.permissions); // Reset textedit areaes
-    $scope.edit.editPermissionDisabled = true;
-  };
-
-  $scope.savePermissions = function() {
-    PermissionSaver.saveViewPermissions(
-      $scope.permissions,
-      $scope.permissionsEdit,
-      {
-        view_name: $scope.instance.ViewInstanceInfo.view_name,
-        version: $scope.instance.ViewInstanceInfo.version,
-        instance_name: $scope.instance.ViewInstanceInfo.instance_name
-      }
-    )
-    .then(reloadViewPrivilegies)
-    .catch(function(data) {
-      reloadViewPrivilegies();
-      uiAlert.danger(data.data.status, data.data.message);
-    });
-    $scope.edit.editPermissionDisabled = true;
-  };
-
-  $scope.removePermission = function(permissionName, principalType, principalName) {
-    var modalInstance = $modal.open({
-      templateUrl: 'views/ambariViews/modals/create.html',
-      size: 'lg',
-      controller: 'CreateViewInstanceCtrl',
-      resolve: {
-        viewVersion: function(){
-          return '';
-        }
-      }
-    });
-
-
-
-    View.deletePrivilege({
-      view_name: $scope.instance.ViewInstanceInfo.view_name,
-      version: $scope.instance.ViewInstanceInfo.version,
-      instance_name: $scope.instance.ViewInstanceInfo.instance_name,
-      permissionName: permissionName,
-      principalType: principalType,
-      principalName: principalName
-    })
-    .then(reloadViewPrivilegies)
-    .catch(function(data) {
-      reloadViewPrivilegies();
-      uiAlert.danger(data.data.status, data.data.message);
-    });
-  };
-
-  $scope.close = function() {
-    $modalInstance.close();
-  };
-}]);

+ 46 - 28
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsEditCtrl.js

@@ -25,12 +25,19 @@ angular.module('ambariAdminConsole')
     View.getInstance($routeParams.viewId, $routeParams.version, $routeParams.instanceId)
     View.getInstance($routeParams.viewId, $routeParams.version, $routeParams.instanceId)
     .then(function(instance) {
     .then(function(instance) {
       $scope.instance = instance;
       $scope.instance = instance;
+
       $scope.settings = {
       $scope.settings = {
         'visible': $scope.instance.ViewInstanceInfo.visible,
         'visible': $scope.instance.ViewInstanceInfo.visible,
-        'label': $scope.instance.ViewInstanceInfo.label
+        'label': $scope.instance.ViewInstanceInfo.label,
+        'description': $scope.instance.ViewInstanceInfo.description
       };
       };
 
 
       $scope.configuration = angular.copy($scope.instance.ViewInstanceInfo.properties);
       $scope.configuration = angular.copy($scope.instance.ViewInstanceInfo.properties);
+      for(var confName in $scope.configuration){
+        if( $scope.configuration.hasOwnProperty(confName) ){
+          $scope.configuration[confName] = $scope.configuration[confName] === 'null' ? '' : $scope.configuration[confName];
+        }
+      }
       $scope.isConfigurationEmpty = angular.equals({}, $scope.configuration);
       $scope.isConfigurationEmpty = angular.equals({}, $scope.configuration);
     })
     })
     .catch(function(data) {
     .catch(function(data) {
@@ -72,45 +79,56 @@ angular.module('ambariAdminConsole')
   reloadViewPrivilegies();
   reloadViewPrivilegies();
 
 
   $scope.editSettingsDisabled = true;
   $scope.editSettingsDisabled = true;
-  
+  $scope.toggleSettingsEdit = function() {
+    $scope.editSettingsDisabled = !$scope.editSettingsDisabled;
+  };
 
 
   $scope.saveSettings = function() {
   $scope.saveSettings = function() {
-    View.updateInstance($routeParams.viewId, $routeParams.version, $routeParams.instanceId, {
-      'ViewInstanceInfo':{
-        'visible': $scope.settings.visible,
-        'label': $scope.settings.label
-      }
-    })
-    .success(function() {
-      reloadViewInfo();
-      $scope.editSettingsDisabled = true;
-    })
-    .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
-    });
+    if( $scope.settingsForm.$valid ){
+      View.updateInstance($routeParams.viewId, $routeParams.version, $routeParams.instanceId, {
+        'ViewInstanceInfo':{
+          'visible': $scope.settings.visible,
+          'label': $scope.settings.label,
+          'description': $scope.settings.description
+        }
+      })
+      .success(function() {
+        reloadViewInfo();
+        $scope.editSettingsDisabled = true;
+      })
+      .catch(function(data) {
+        uiAlert.danger(data.data.status, data.data.message);
+      });
+    }
   };
   };
   $scope.cancelSettings = function() {
   $scope.cancelSettings = function() {
     $scope.settings = {
     $scope.settings = {
       'visible': $scope.instance.ViewInstanceInfo.visible,
       'visible': $scope.instance.ViewInstanceInfo.visible,
-      'label': $scope.instance.ViewInstanceInfo.label
+      'label': $scope.instance.ViewInstanceInfo.label,
+      'description': $scope.instance.ViewInstanceInfo.description
     };
     };
     $scope.editSettingsDisabled = true;
     $scope.editSettingsDisabled = true;
   };
   };
 
 
+  
   $scope.editConfigurationDisabled = true;
   $scope.editConfigurationDisabled = true;
-
+  $scope.togglePropertiesEditing = function() {
+     $scope.editConfigurationDisabled = !$scope.editConfigurationDisabled;
+  }
   $scope.saveConfiguration = function() {
   $scope.saveConfiguration = function() {
-    View.updateInstance($routeParams.viewId, $routeParams.version, $routeParams.instanceId, {
-      'ViewInstanceInfo':{
-        'properties': $scope.configuration
-      }
-    })
-    .success(function() {
-      $scope.editConfigurationDisabled = true;
-    })
-    .catch(function(data) {
-      uiAlert.danger(data.data.status, data.data.message);
-    });
+    if( $scope.propertiesForm.$valid ){
+      View.updateInstance($routeParams.viewId, $routeParams.version, $routeParams.instanceId, {
+        'ViewInstanceInfo':{
+          'properties': $scope.configuration
+        }
+      })
+      .success(function() {
+        $scope.editConfigurationDisabled = true;
+      })
+      .catch(function(data) {
+        uiAlert.danger(data.data.status, data.data.message);
+      });
+    }
   };
   };
   $scope.cancelConfiguration = function() {
   $scope.cancelConfiguration = function() {
     $scope.configuration = angular.copy($scope.instance.ViewInstanceInfo.properties);
     $scope.configuration = angular.copy($scope.instance.ViewInstanceInfo.properties);

+ 0 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js

@@ -73,7 +73,6 @@ angular.module('ambariAdminConsole')
         return !!view; // Remove 'undefined'
         return !!view; // Remove 'undefined'
       });
       });
     }
     }
-
     $scope.filteredViews = result;
     $scope.filteredViews = result;
   };
   };
 }]);
 }]);

+ 10 - 1
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersListCtrl.js

@@ -39,7 +39,8 @@ angular.module('ambariAdminConsole')
       usersPerPage: $scope.usersPerPage, 
       usersPerPage: $scope.usersPerPage, 
       searchString: $scope.currentNameFilter,
       searchString: $scope.currentNameFilter,
       ldap_user: $scope.currentTypeFilter.value,
       ldap_user: $scope.currentTypeFilter.value,
-      active: $scope.currentActiveFilter.value
+      active: $scope.currentActiveFilter.value,
+      admin: $scope.adminFilter
     }).then(function(data) {
     }).then(function(data) {
       $scope.totalUsers = data.data.itemTotal;
       $scope.totalUsers = data.data.itemTotal;
       $scope.users = data.data.items;
       $scope.users = data.data.items;
@@ -66,6 +67,14 @@ angular.module('ambariAdminConsole')
   ];
   ];
   $scope.currentTypeFilter = $scope.typeFilterOptions[0];
   $scope.currentTypeFilter = $scope.typeFilterOptions[0];
 
 
+  $scope.adminFilter = false;
+  $scope.toggleAdminFilter = function() {
+    $scope.adminFilter = !$scope.adminFilter;
+    $scope.resetPagination();
+    $scope.loadUsers();
+  };
+
+
   $scope.loadUsers();
   $scope.loadUsers();
 
 
   $rootScope.$watch(function(scope) {
   $rootScope.$watch(function(scope) {

+ 7 - 2
ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/users/UsersShowCtrl.js

@@ -57,7 +57,6 @@ angular.module('ambariAdminConsole')
     $q.all(promises).then(function() {
     $q.all(promises).then(function() {
       loadUserInfo();
       loadUserInfo();
     });
     });
-    // $scope.user.user_groups = $scope.editingGroupsList.split(',');
     $scope.isGroupEditing = false;
     $scope.isGroupEditing = false;
   };
   };
 
 
@@ -69,13 +68,19 @@ angular.module('ambariAdminConsole')
   $scope.openChangePwdDialog = function() {
   $scope.openChangePwdDialog = function() {
     var modalInstance = $modal.open({
     var modalInstance = $modal.open({
       templateUrl: 'views/users/modals/changePassword.html',
       templateUrl: 'views/users/modals/changePassword.html',
-      controller: ['$scope', function($scope) {
+      resolve: {
+        userName: function() {
+          return $scope.user.user_name;
+        }
+      },
+      controller: ['$scope', 'userName', function($scope, userName) {
         $scope.passwordData = {
         $scope.passwordData = {
           password: '',
           password: '',
           currentUserPassword: ''
           currentUserPassword: ''
         };
         };
 
 
         $scope.form = {};
         $scope.form = {};
+        $scope.userName = userName;
 
 
         $scope.ok = function() {
         $scope.ok = function() {
           $scope.form.passwordChangeForm.submitted = true;
           $scope.form.passwordChangeForm.submitted = true;

+ 1 - 0
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/User.js

@@ -43,6 +43,7 @@ angular.module('ambariAdminConsole')
         + '&page_size=' + params.usersPerPage
         + '&page_size=' + params.usersPerPage
         + (params.ldap_user === '*' ? '' : '&Users/ldap_user=' + params.ldap_user)
         + (params.ldap_user === '*' ? '' : '&Users/ldap_user=' + params.ldap_user)
         + (params.active === '*' ? '' : '&Users/active=' + params.active)
         + (params.active === '*' ? '' : '&Users/active=' + params.active)
+        + (params.admin ? '&Users/admin=true' : '')
       );
       );
     },
     },
     get: function(userId) {
     get: function(userId) {

+ 4 - 3
ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/View.js

@@ -72,7 +72,7 @@ angular.module('ambariAdminConsole')
       }
       }
     }
     }
 
 
-    self.isOpened = !self.instances.length;
+    // self.isOpened = !self.instances.length;
     self.versionsList = item.versions;
     self.versionsList = item.versions;
   }
   }
 
 
@@ -177,7 +177,8 @@ angular.module('ambariAdminConsole')
           visible: instanceInfo.visible,
           visible: instanceInfo.visible,
           icon_path: instanceInfo.icon_path,
           icon_path: instanceInfo.icon_path,
           icon64_path: instanceInfo.icon64_path,
           icon64_path: instanceInfo.icon64_path,
-          properties: properties
+          properties: properties,
+          description: instanceInfo.description
         }
         }
       }
       }
     })
     })
@@ -239,7 +240,7 @@ angular.module('ambariAdminConsole')
     var fields = [
     var fields = [
       'versions/ViewVersionInfo/version',
       'versions/ViewVersionInfo/version',
       'versions/instances/ViewInstanceInfo',
       'versions/instances/ViewInstanceInfo',
-      'versions/ViewVersionInfo'
+      'versions/*'
     ];
     ];
 
 
     $http({
     $http({

+ 24 - 0
ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css

@@ -16,6 +16,17 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
+ .instances-table{
+  table-layout: fixed;
+ }
+ .description-column{
+  text-overflow: ellipsis;
+  overflow: hidden;
+  white-space: nowrap;
+  max-width: 100%;
+  display: inline-block;
+ }
+
 .paginator{
 .paginator{
   margin: 0;
   margin: 0;
 }
 }
@@ -106,6 +117,11 @@
   width: 14px;
   width: 14px;
 }
 }
 
 
+.settings-edit-toggle.disabled, .properties-toggle.disabled{
+  color: #999;
+  cursor: not-allowed;
+}
+
 .pulldown2{
 .pulldown2{
   -webkit-transform: translateY(2px);
   -webkit-transform: translateY(2px);
   -ms-transform: translateY(2px);
   -ms-transform: translateY(2px);
@@ -268,6 +284,14 @@ ul.nav li > a{
   cursor: pointer;
   cursor: pointer;
 }
 }
 
 
+.admin-filter{
+  cursor: pointer;
+}
+
+.glyphicon-flash.no-filter{
+  color: #999;
+}
+
 .top-buffer{
 .top-buffer{
   padding-top: 20px;
   padding-top: 20px;
 }
 }

+ 9 - 0
ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/create.html

@@ -73,6 +73,15 @@
           </div>
           </div>
         </div>
         </div>
       </div>
       </div>
+      <div class="form-group" ng-class="{'has-error' : form.isntanceCreateForm.description.$error.required && form.isntanceCreateForm.submitted }">
+        <label for="" class="control-label col-sm-2">Instance Description</label>
+        <div class="col-sm-10">
+          <input type="text" class="form-control" name="description" ng-model="instance.description" maxlength="140" required>
+          <div class="alert alert-danger no-margin-bottom top-margin" ng-show='form.isntanceCreateForm.description.$error.required && form.isntanceCreateForm.submitted'>
+            This field is required.
+          </div>
+        </div>
+      </div>
       <div class="form-group">
       <div class="form-group">
         <div class="col-sm-10 col-sm-offset-2">
         <div class="col-sm-10 col-sm-offset-2">
           <div class="checkbox">
           <div class="checkbox">

+ 31 - 10
ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/edit.html

@@ -28,12 +28,13 @@
 <div class="panel panel-default" ng-cloak ng-show="instance">
 <div class="panel panel-default" ng-cloak ng-show="instance">
   <div class="panel-heading clearfix">
   <div class="panel-heading clearfix">
     <h3 class="panel-title pull-left">Settings</h3>
     <h3 class="panel-title pull-left">Settings</h3>
-    <div class="pull-right">
-      <a href ng-click="editSettingsDisabled = !editSettingsDisabled" ng-show="editSettingsDisabled" class="settings-edit-toggle"> <span class="glyphicon glyphicon-pencil"></span> Edit</a>
+    <div class="pull-right" ng-switch="instance.ViewInstanceInfo.static">
+      <a href ng-switch-when="false" ng-click="toggleSettingsEdit()" ng-show="editSettingsDisabled" class="settings-edit-toggle"> <span class="glyphicon glyphicon-pencil" ></span> Edit</a>
+      <a href ng-switch-when="true" class="settings-edit-toggle disabled" tooltip="You can`t edit XML driven instances"> <span class="glyphicon glyphicon-pencil" ></span> Edit</a>
     </div>
     </div>
   </div>
   </div>
   <div class="panel-body">
   <div class="panel-body">
-    <form class="form-horizontal">
+    <form class="form-horizontal" name="settingsForm" novalidate>
       <div class="form-group">
       <div class="form-group">
         <label for="" class="col-sm-2 control-label">View Name</label>
         <label for="" class="col-sm-2 control-label">View Name</label>
         <div class="col-sm-10"><input disabled="disabled" type="text" class="form-control instancename-input" placeholder="Display Name" value="{{instance.ViewInstanceInfo.view_name}}"></div>
         <div class="col-sm-10"><input disabled="disabled" type="text" class="form-control instancename-input" placeholder="Display Name" value="{{instance.ViewInstanceInfo.view_name}}"></div>
@@ -47,9 +48,23 @@
           <label for="" class="col-sm-2 control-label">Instance Name</label>
           <label for="" class="col-sm-2 control-label">Instance Name</label>
           <div class="col-sm-10"><input disabled="disabled" type="text" class="form-control instancename-input" placeholder="Display Name" value="{{instance.ViewInstanceInfo.instance_name}}"></div>
           <div class="col-sm-10"><input disabled="disabled" type="text" class="form-control instancename-input" placeholder="Display Name" value="{{instance.ViewInstanceInfo.instance_name}}"></div>
         </div>
         </div>
-        <div class="form-group">
+        <div class="form-group" ng-class="{'has-error' : settingsForm.displayName.$error.required && !editSettingsDisabled}">
           <label for="" class="col-sm-2 control-label">Display Name</label>
           <label for="" class="col-sm-2 control-label">Display Name</label>
-          <div class="col-sm-10"><input type="text" class="form-control instancename-input" placeholder="Display Name" ng-model="settings.label"></div>
+          <div class="col-sm-10">
+            <input type="text" class="form-control instancename-input" placeholder="Display Name" name="displayName" required ng-model="settings.label">
+            <div class="alert alert-danger no-margin-bottom top-margin" ng-show='settingsForm.displayName.$error.required  && !editSettingsDisabled'>
+              This field is required.
+            </div>
+          </div>
+        </div>
+        <div class="form-group" ng-class="{'has-error' : settingsForm.description.$error.required  && !editSettingsDisabled}">
+          <label for="" class="control-label col-sm-2">Instance Description</label>
+          <div class="col-sm-10">
+            <input type="text" class="form-control" ng-model="settings.description" name="description" placeholder="Instance Description" required>
+            <div class="alert alert-danger no-margin-bottom top-margin" ng-show='settingsForm.description.$error.required  && !editSettingsDisabled'>
+              This field is required.
+            </div>
+          </div>
         </div>
         </div>
         <div class="form-group">
         <div class="form-group">
           <div class="col-sm-offset-2 col-sm-10">
           <div class="col-sm-offset-2 col-sm-10">
@@ -123,16 +138,22 @@
 <div class="panel panel-default">
 <div class="panel panel-default">
   <div class="panel-heading clearfix">
   <div class="panel-heading clearfix">
     <h3 class="panel-title pull-left">Properties</h3>
     <h3 class="panel-title pull-left">Properties</h3>
-    <div class="pull-right">
-      <a href ng-hide="isConfigurationEmpty" ng-click="editConfigurationDisabled = !editConfigurationDisabled" ng-show="editConfigurationDisabled" class="properties-toggle"> <span class="glyphicon glyphicon-pencil"></span> Edit</a>
+    <div class="pull-right" ng-switch="instance.ViewInstanceInfo.static">
+      <a href ng-switch-when="false" ng-hide="isConfigurationEmpty" ng-click="togglePropertiesEditing()" ng-show="editConfigurationDisabled" class="properties-toggle"> <span class="glyphicon glyphicon-pencil"></span> Edit</a>
+      <a href ng-switch-when="true" ng-hide="isConfigurationEmpty"  class="properties-toggle disabled"> <span class="glyphicon glyphicon-pencil"></span> Edit</a>
     </div>
     </div>
   </div>
   </div>
   <div class="panel-body">
   <div class="panel-body">
-    <form action="" class="form-horizontal" ng-hide="isConfigurationEmpty">
+    <form name="propertiesForm" class="form-horizontal" ng-hide="isConfigurationEmpty" novalidate>
       <fieldset ng-disabled="editConfigurationDisabled">
       <fieldset ng-disabled="editConfigurationDisabled">
-        <div class="form-group" ng-repeat="(propertyName, propertyValue) in configurationMeta">
+        <div class="form-group" ng-repeat="(propertyName, propertyValue) in configurationMeta" ng-class="{'has-error' : propertyValue.required && propertiesForm[propertyName].$error.required && !editConfigurationDisabled}">
           <label for="" class="control-label col-sm-3" ng-class="{'not-required': !propertyValue.required}" tooltip="{{propertyValue.description}}">{{propertyName}}{{propertyValue.required ? '*' : ''}}</label>
           <label for="" class="control-label col-sm-3" ng-class="{'not-required': !propertyValue.required}" tooltip="{{propertyValue.description}}">{{propertyName}}{{propertyValue.required ? '*' : ''}}</label>
-          <div class="col-sm-9"><input type="{{propertyValue.masked ? 'password' : 'text'}}" class="form-control propertie-input" ng-model="configuration[propertyName]"></div>
+          <div class="col-sm-9">
+            <input type="{{propertyValue.masked ? 'password' : 'text'}}" class="form-control propertie-input" ng-required="propertyValue.required" name="{{propertyName}}" ng-model="configuration[propertyName]">
+            <div class="alert alert-danger no-margin-bottom top-margin" ng-show='propertyValue.required && propertiesForm[propertyName].$error.required && !editConfigurationDisabled'>
+              This field is required.
+            </div>
+          </div>
         </div>
         </div>
         <div class="form-group" ng-hide="editConfigurationDisabled">
         <div class="form-group" ng-hide="editConfigurationDisabled">
           <div class="col-sm-offset-2 col-sm-10">
           <div class="col-sm-offset-2 col-sm-10">

+ 4 - 4
ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html

@@ -46,18 +46,18 @@
             {{view.view_name}}
             {{view.view_name}}
           </div>
           </div>
           <div class="col-sm-3">{{view.versions}}</div>
           <div class="col-sm-3">{{view.versions}}</div>
-          <div class="col-sm-6">This is a description</div>
+          <div class="col-sm-6">{{view.description}}</div>
         </div>
         </div>
       </accordion-heading>
       </accordion-heading>
-      <table class="table">
+      <table class="table instances-table">
         <tbody>
         <tbody>
           <tr ng-repeat="instance in view.instances">
           <tr ng-repeat="instance in view.instances">
             <td class="col-sm-3"></td>
             <td class="col-sm-3"></td>
             <td class="col-sm-3">
             <td class="col-sm-3">
               <a href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/edit" class="instance-link">{{instance.ViewInstanceInfo.label}}</a>
               <a href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/edit" class="instance-link">{{instance.ViewInstanceInfo.label}}</a>
             </td>
             </td>
-            <td class="col-sm-3">{{instance.ViewInstanceInfo.version}}</td>
-            <td class="col-sm-3">
+            <td class="col-sm-1">{{instance.ViewInstanceInfo.version}}</td>
+            <td class="col-sm-5 " ><div class="description-column" tooltip="{{instance.ViewInstanceInfo.description}}">{{instance.ViewInstanceInfo.description || 'No description'}}</div>
             </td>
             </td>
           </tr>
           </tr>
         </tbody>
         </tbody>

+ 1 - 2
ambari-admin/src/main/resources/ui/admin-web/app/views/groups/list.html

@@ -33,9 +33,8 @@
         </th>
         </th>
         <th>
         <th>
           <label for="">Type</label>
           <label for="">Type</label>
-          
         </th>
         </th>
-        <th>Members</th>
+        <th><label for="">Members</label></th>
       </tr>
       </tr>
       <tr>
       <tr>
         <th class="col-sm-8">
         <th class="col-sm-8">

+ 9 - 0
ambari-admin/src/main/resources/ui/admin-web/app/views/users/list.html

@@ -29,6 +29,13 @@
   <table class="table table-striped table-hover">
   <table class="table table-striped table-hover">
     <thead>
     <thead>
       <tr>
       <tr>
+        <th width="30">
+          <span class="bottom-margin admin-filter glyphicon glyphicon-flash" 
+            ng-class="{'no-filter' : !adminFilter}" 
+            ng-click="toggleAdminFilter()"
+            tooltip="{{adminFilter ? 'Show all users' : 'Show only admin users'}}"
+          ></span>
+        </th>
         <th>
         <th>
           <div class="search-container">
           <div class="search-container">
             <label for="">Username</label>
             <label for="">Username</label>
@@ -59,6 +66,8 @@
       <tr ng-repeat="user in users">
       <tr ng-repeat="user in users">
         <td>
         <td>
           <span class="glyphicon" tooltip="{{user.Users.admin ? 'Ambari Admin' : ''}}" ng-class="{'glyphicon-flash' : user.Users.admin}"></span>
           <span class="glyphicon" tooltip="{{user.Users.admin ? 'Ambari Admin' : ''}}" ng-class="{'glyphicon-flash' : user.Users.admin}"></span>
+        </td>
+        <td>
           <link-to route="users.show" id="{{user.Users.user_name}}">{{user.Users.user_name}}</link-to>
           <link-to route="users.show" id="{{user.Users.user_name}}">{{user.Users.user_name}}</link-to>
         </td>
         </td>
         <td>{{user.Users.ldap_user ? 'LDAP' : 'Local'}}</td>
         <td>{{user.Users.ldap_user ? 'LDAP' : 'Local'}}</td>

+ 4 - 4
ambari-admin/src/main/resources/ui/admin-web/app/views/users/modals/changePassword.html

@@ -16,7 +16,7 @@
 * limitations under the License.
 * limitations under the License.
 -->
 -->
 <div class="modal-header">
 <div class="modal-header">
-  <h3 class="modal-title">Change Password</h3>
+  <h3 class="modal-title">Change Password for {{userName}}</h3>
 </div>
 </div>
 <div class="modal-body">
 <div class="modal-body">
   <form class="form-horizontal" novalidate name="form.passwordChangeForm" role="form" >
   <form class="form-horizontal" novalidate name="form.passwordChangeForm" role="form" >
@@ -30,10 +30,10 @@
       </div>
       </div>
     </div>
     </div>
     <div class="form-group no-margin-bottom" ng-class="{'has-error' : (form.passwordChangeForm.password.$error.required && form.passwordChangeForm.submitted) || form.passwordChangeForm.confirmPassword.$error.passwordVerify}">
     <div class="form-group no-margin-bottom" ng-class="{'has-error' : (form.passwordChangeForm.password.$error.required && form.passwordChangeForm.submitted) || form.passwordChangeForm.confirmPassword.$error.passwordVerify}">
-      <label for="" class="col-sm-4 control-label">New Password:</label>
+      <label for="" class="col-sm-4 control-label">New User Password:</label>
       <div class="col-sm-8">
       <div class="col-sm-8">
-        <input type="password" class="form-control bottom-margin" name="password" placeholder="Password" required ng-model="passwordData.password" autocomplete="off">
-        <input type="password" class="form-control bottom-margin" name="confirmPassword" placeholder="Password confirmation" required ng-model="passwordData.passwordConfirmation"
+        <input type="password" class="form-control bottom-margin" name="password" placeholder="New User Password" required ng-model="passwordData.password" autocomplete="off">
+        <input type="password" class="form-control bottom-margin" name="confirmPassword" placeholder="New User Password Confirmation" required ng-model="passwordData.passwordConfirmation"
           password-verify="passwordData.password" autocomplete="off">
           password-verify="passwordData.password" autocomplete="off">
         <div class="alert alert-danger no-margin-bottom" ng-show='form.passwordChangeForm.confirmPassword.$error.passwordVerify'>
         <div class="alert alert-danger no-margin-bottom" ng-show='form.passwordChangeForm.confirmPassword.$error.passwordVerify'>
           Password must match!
           Password must match!

+ 8 - 3
ambari-admin/src/main/resources/ui/admin-web/app/views/users/show.html

@@ -52,7 +52,11 @@
     <div class="form-group">
     <div class="form-group">
       <label for="password" class="col-sm-2 control-label">Password</label>
       <label for="password" class="col-sm-2 control-label">Password</label>
       <div class="col-sm-10">
       <div class="col-sm-10">
-        <a href ng-click="openChangePwdDialog()" ng-disabled="user.ldap_user" class="btn btn-default changepassword">Change Password</a>
+        <div ng-switch="user.ldap_user">
+          <button class="btn deleteuser-btn disabled btn-default" ng-switch-when="true" tooltip="Cannot Change Password">Change Password</button>
+          <a href ng-click="openChangePwdDialog()" ng-switch-when="false" class="btn btn-default changepassword">Change Password</a>
+        </div>
+          
       </div>
       </div>
     </div>
     </div>
     <div class="form-group">
     <div class="form-group">
@@ -82,7 +86,7 @@
       </div>
       </div>
         
         
     </div>
     </div>
-    <div class="form-group">
+    <div class="form-group" >
       <label for="" class="col-sm-2 control-label">Privileges</label>
       <label for="" class="col-sm-2 control-label">Privileges</label>
       <div class="col-sm-10">
       <div class="col-sm-10">
         <table class="table">
         <table class="table">
@@ -92,7 +96,7 @@
               <th>Permissions</th>
               <th>Permissions</th>
             </tr>
             </tr>
           </thead>
           </thead>
-          <tbody>
+          <tbody ng-hide="user.admin">
             <tr ng-repeat="(name, privilege) in privileges.clusters">
             <tr ng-repeat="(name, privilege) in privileges.clusters">
               <td>
               <td>
                 <span class="glyphicon glyphicon-cloud"></span> 
                 <span class="glyphicon glyphicon-cloud"></span> 
@@ -114,6 +118,7 @@
           </tbody>
           </tbody>
         </table>
         </table>
         <div class="alert alert-info" ng-show="!privileges">This user does not have any privileges.</div>
         <div class="alert alert-info" ng-show="!privileges">This user does not have any privileges.</div>
+        <div class="alert alert-info" ng-show="user.admin">This user is an Ambari Admin and has all privileges.</div>
       </div>
       </div>
     </div>
     </div>
   </form>
   </form>

+ 1 - 0
ambari-admin/src/main/resources/view.xml

@@ -20,5 +20,6 @@ limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
   <version>1.0.0</version>
   <version>1.0.0</version>
   <instance>
   <instance>
     <name>INSTANCE</name>
     <name>INSTANCE</name>
+    <visible>false</visible>
   </instance>
   </instance>
 </view>
 </view>

+ 7 - 2
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -237,8 +237,13 @@ class AmbariConfig:
     self.net = NetUtil()
     self.net = NetUtil()
     self.config.readfp(StringIO.StringIO(content))
     self.config.readfp(StringIO.StringIO(content))
 
 
-  def get(self, section, value):
-    return self.config.get(section, value)
+  def get(self, section, value, default=None):
+    try:
+      return self.config.get(section, value)
+    except ConfigParser.Error, err:
+      if default:
+        return default
+      raise err
 
 
   def set(self, section, option, value):
   def set(self, section, option, value):
     self.config.set(section, option, value)
     self.config.set(section, option, value)

+ 26 - 29
ambari-agent/src/main/python/ambari_agent/Controller.py

@@ -86,17 +86,20 @@ class Controller(threading.Thread):
   def __del__(self):
   def __del__(self):
     logger.info("Server connection disconnected.")
     logger.info("Server connection disconnected.")
     pass
     pass
-  
+
   def registerWithServer(self):
   def registerWithServer(self):
+    """
+    :return: returning from current method without setting self.isRegistered
+    to True will lead to agent termination.
+    """
     LiveStatus.SERVICES = []
     LiveStatus.SERVICES = []
     LiveStatus.CLIENT_COMPONENTS = []
     LiveStatus.CLIENT_COMPONENTS = []
     LiveStatus.COMPONENTS = []
     LiveStatus.COMPONENTS = []
-    id = -1
     ret = {}
     ret = {}
 
 
     while not self.isRegistered:
     while not self.isRegistered:
       try:
       try:
-        data = json.dumps(self.register.build(id))
+        data = json.dumps(self.register.build())
         prettyData = pprint.pformat(data)
         prettyData = pprint.pformat(data)
 
 
         try:
         try:
@@ -119,8 +122,7 @@ class Controller(threading.Thread):
           # log - message, which will be printed to agents log
           # log - message, which will be printed to agents log
           if 'log' in ret.keys():
           if 'log' in ret.keys():
             log = ret['log']
             log = ret['log']
-
-          logger.error(log)
+            logger.error(log)
           self.isRegistered = False
           self.isRegistered = False
           self.repeatRegistration = False
           self.repeatRegistration = False
           return ret
           return ret
@@ -130,7 +132,7 @@ class Controller(threading.Thread):
         self.responseId = int(ret['responseId'])
         self.responseId = int(ret['responseId'])
         self.isRegistered = True
         self.isRegistered = True
         if 'statusCommands' in ret.keys():
         if 'statusCommands' in ret.keys():
-          logger.info("Got status commands on registration " + pprint.pformat(ret['statusCommands']) )
+          logger.info("Got status commands on registration " + pprint.pformat(ret['statusCommands']))
           self.addToStatusQueue(ret['statusCommands'])
           self.addToStatusQueue(ret['statusCommands'])
           pass
           pass
         else:
         else:
@@ -143,16 +145,15 @@ class Controller(threading.Thread):
 
 
         pass
         pass
       except ssl.SSLError:
       except ssl.SSLError:
-        self.repeatRegistration=False
+        self.repeatRegistration = False
         self.isRegistered = False
         self.isRegistered = False
         return
         return
       except Exception:
       except Exception:
         # try a reconnect only after a certain amount of random time
         # try a reconnect only after a certain amount of random time
         delay = randint(0, self.range)
         delay = randint(0, self.range)
-        logger.error("Unable to connect to: " + self.registerUrl, exc_info = True)
+        logger.error("Unable to connect to: " + self.registerUrl, exc_info=True)
         """ Sleeping for {0} seconds and then retrying again """.format(delay)
         """ Sleeping for {0} seconds and then retrying again """.format(delay)
         time.sleep(delay)
         time.sleep(delay)
-        pass
       pass
       pass
     return ret
     return ret
 
 
@@ -161,7 +162,7 @@ class Controller(threading.Thread):
     if commands:
     if commands:
       self.actionQueue.cancel(commands)
       self.actionQueue.cancel(commands)
     pass
     pass
-  
+
   def addToQueue(self, commands):
   def addToQueue(self, commands):
     """Add to the queue for running the commands """
     """Add to the queue for running the commands """
     """ Put the required actions into the Queue """
     """ Put the required actions into the Queue """
@@ -192,11 +193,8 @@ class Controller(threading.Thread):
     self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
     self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
     retry = False
     retry = False
     certVerifFailed = False
     certVerifFailed = False
-
     hb_interval = self.config.get('heartbeat', 'state_interval')
     hb_interval = self.config.get('heartbeat', 'state_interval')
 
 
-    #TODO make sure the response id is monotonically increasing
-    id = 0
     while not self.DEBUG_STOP_HEARTBEATING:
     while not self.DEBUG_STOP_HEARTBEATING:
       try:
       try:
         if not retry:
         if not retry:
@@ -226,7 +224,7 @@ class Controller(threading.Thread):
           logger.info('Heartbeat response received (id = %s)', serverId)
           logger.info('Heartbeat response received (id = %s)', serverId)
 
 
         if 'hasMappedComponents' in response.keys():
         if 'hasMappedComponents' in response.keys():
-          self.hasMappedComponents = response['hasMappedComponents'] != False
+          self.hasMappedComponents = response['hasMappedComponents'] is not False
 
 
         if 'registrationCommand' in response.keys():
         if 'registrationCommand' in response.keys():
           # check if the registration command is None. If none skip
           # check if the registration command is None. If none skip
@@ -240,7 +238,7 @@ class Controller(threading.Thread):
           logger.error("Error in responseId sequence - restarting")
           logger.error("Error in responseId sequence - restarting")
           self.restartAgent()
           self.restartAgent()
         else:
         else:
-          self.responseId=serverId
+          self.responseId = serverId
 
 
         if 'cancelCommands' in response.keys():
         if 'cancelCommands' in response.keys():
           self.cancelCommandInQueue(response['cancelCommands'])
           self.cancelCommandInQueue(response['cancelCommands'])
@@ -268,7 +266,7 @@ class Controller(threading.Thread):
         if retry:
         if retry:
           logger.info("Reconnected to %s", self.heartbeatUrl)
           logger.info("Reconnected to %s", self.heartbeatUrl)
 
 
-        retry=False
+        retry = False
         certVerifFailed = False
         certVerifFailed = False
         self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
         self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
         self.DEBUG_HEARTBEAT_RETRIES = 0
         self.DEBUG_HEARTBEAT_RETRIES = 0
@@ -278,10 +276,6 @@ class Controller(threading.Thread):
         self.isRegistered = False
         self.isRegistered = False
         return
         return
       except Exception, err:
       except Exception, err:
-        #randomize the heartbeat
-        delay = randint(0, self.range)
-        time.sleep(delay)
-
         if "code" in err:
         if "code" in err:
           logger.error(err.code)
           logger.error(err.code)
         else:
         else:
@@ -301,13 +295,17 @@ class Controller(threading.Thread):
             logger.warn("Server certificate verify failed. Did you regenerate server certificate?")
             logger.warn("Server certificate verify failed. Did you regenerate server certificate?")
             certVerifFailed = True
             certVerifFailed = True
 
 
-        self.cachedconnect = None # Previous connection is broken now
-        retry=True
+        self.cachedconnect = None  # Previous connection is broken now
+        retry = True
+
+        #randomize the heartbeat
+        delay = randint(0, self.range)
+        time.sleep(delay)
 
 
       # Sleep for some time
       # Sleep for some time
       timeout = self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC \
       timeout = self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC \
                 - self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS
                 - self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS
-      self.heartbeat_wait_event.wait(timeout = timeout)
+      self.heartbeat_wait_event.wait(timeout=timeout)
       # Sleep a bit more to allow STATUS_COMMAND results to be collected
       # Sleep a bit more to allow STATUS_COMMAND results to be collected
       # and sent in one heartbeat. Also avoid server overload with heartbeats
       # and sent in one heartbeat. Also avoid server overload with heartbeats
       time.sleep(self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
       time.sleep(self.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
@@ -365,17 +363,16 @@ class Controller(threading.Thread):
       return json.loads(response)
       return json.loads(response)
     except Exception, exception:
     except Exception, exception:
       if response is None:
       if response is None:
-        err_msg = 'Request to {0} failed due to {1}'.format(url, str(exception))
-        return {'exitstatus': 1, 'log': err_msg}
+        raise IOError('Request to {0} failed due to {1}'.format(url, str(exception)))
       else:
       else:
-        err_msg = ('Response parsing failed! Request data: ' + str(data)
-            + '; Response: ' + str(response))
-        logger.warn(err_msg)
-        return {'exitstatus': 1, 'log': err_msg}
+        raise IOError('Response parsing failed! Request data: ' + str(data)
+                      + '; Response: ' + str(response))
+
 
 
   def updateComponents(self, cluster_name):
   def updateComponents(self, cluster_name):
     logger.info("Updating components map of cluster " + cluster_name)
     logger.info("Updating components map of cluster " + cluster_name)
 
 
+    # May throw IOError on server connection error
     response = self.sendRequest(self.componentsUrl + cluster_name, None)
     response = self.sendRequest(self.componentsUrl + cluster_name, None)
     logger.debug("Response from %s was %s", self.serverHostname, str(response))
     logger.debug("Response from %s was %s", self.serverHostname, str(response))
 
 

+ 3 - 3
ambari-agent/src/main/python/ambari_agent/DataCleaner.py

@@ -36,21 +36,21 @@ class DataCleaner(threading.Thread):
     logger.info('Data cleanup thread started')
     logger.info('Data cleanup thread started')
     self.config = config
     self.config = config
 
 
-    self.file_max_age = config.get('agent','data_cleanup_max_age')
+    self.file_max_age = config.get('agent', 'data_cleanup_max_age', 86400)
     self.file_max_age = int(self.file_max_age) if self.file_max_age else None
     self.file_max_age = int(self.file_max_age) if self.file_max_age else None
     if self.file_max_age is None or self.file_max_age < 86400:       # keep for at least 24h
     if self.file_max_age is None or self.file_max_age < 86400:       # keep for at least 24h
       logger.warn('The minimum value allowed for data_cleanup_max_age is 1 '
       logger.warn('The minimum value allowed for data_cleanup_max_age is 1 '
                   'day. Setting data_cleanup_max_age to 86400.')
                   'day. Setting data_cleanup_max_age to 86400.')
       self.file_max_age = 86400
       self.file_max_age = 86400
 
 
-    self.cleanup_interval = config.get('agent','data_cleanup_interval')
+    self.cleanup_interval = config.get('agent', 'data_cleanup_interval', 3600)
     self.cleanup_interval = int(self.cleanup_interval) if self.cleanup_interval else None
     self.cleanup_interval = int(self.cleanup_interval) if self.cleanup_interval else None
     if self.cleanup_interval is None or self.cleanup_interval < 3600:    # wait at least 1 hour between runs
     if self.cleanup_interval is None or self.cleanup_interval < 3600:    # wait at least 1 hour between runs
       logger.warn('The minimum value allowed for data_cleanup_interval is 1 '
       logger.warn('The minimum value allowed for data_cleanup_interval is 1 '
                   'hour. Setting data_cleanup_interval to 3600.')
                   'hour. Setting data_cleanup_interval to 3600.')
       self.cleanup_interval = 3600
       self.cleanup_interval = 3600
 
 
-    self.cleanup_max_size_MB = config.get('agent', 'data_cleanup_max_size_MB')
+    self.cleanup_max_size_MB = config.get('agent', 'data_cleanup_max_size_MB', 10000)
     self.cleanup_max_size_MB = int(self.cleanup_max_size_MB) if self.cleanup_max_size_MB else None
     self.cleanup_max_size_MB = int(self.cleanup_max_size_MB) if self.cleanup_max_size_MB else None
     if self.cleanup_max_size_MB is None or self.cleanup_max_size_MB > 10000:  # no more than 10 GBs
     if self.cleanup_max_size_MB is None or self.cleanup_max_size_MB > 10000:  # no more than 10 GBs
       logger.warn('The maximum value allowed for cleanup_max_size_MB is 10000 MB (10 GB). '
       logger.warn('The maximum value allowed for cleanup_max_size_MB is 10000 MB (10 GB). '

+ 2 - 7
ambari-agent/src/test/python/ambari_agent/TestActionQueue.py

@@ -595,15 +595,10 @@ class TestActionQueue(TestCase):
     
     
     report = actionQueue.result()
     report = actionQueue.result()
     self.assertEqual(len(report['reports']),1)
     self.assertEqual(len(report['reports']),1)
-    
-
       
       
-  @patch.object(StackVersionsFileHandler, "read_stack_version")
   @patch.object(CustomServiceOrchestrator, "resolve_script_path")
   @patch.object(CustomServiceOrchestrator, "resolve_script_path")
-  @patch.object(FileCache, "__init__")
-  def test_execute_python_executor(self, read_stack_version_mock, FileCache_mock, resolve_script_path_mock):
-    FileCache_mock.return_value = None
-    
+  @patch.object(StackVersionsFileHandler, "read_stack_version")
+  def test_execute_python_executor(self, read_stack_version_mock, resolve_script_path_mock):
     
     
     dummy_controller = MagicMock()
     dummy_controller = MagicMock()
     cfg = AmbariConfig().getConfig()
     cfg = AmbariConfig().getConfig()

+ 64 - 12
ambari-agent/src/test/python/ambari_agent/TestController.py

@@ -30,7 +30,7 @@ from threading import Event
 import json
 import json
 
 
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
-  from ambari_agent import Controller, ActionQueue
+  from ambari_agent import Controller, ActionQueue, Register
   from ambari_agent import hostname
   from ambari_agent import hostname
   from ambari_agent.Controller import AGENT_AUTO_RESTART_EXIT_CODE
   from ambari_agent.Controller import AGENT_AUTO_RESTART_EXIT_CODE
   from ambari_commons import OSCheck
   from ambari_commons import OSCheck
@@ -247,9 +247,9 @@ class TestController(unittest.TestCase):
     heartbeatWithServer.assert_called_once_with()
     heartbeatWithServer.assert_called_once_with()
 
 
     self.controller.registerWithServer =\
     self.controller.registerWithServer =\
-    Controller.Controller.registerWithServer
+      Controller.Controller.registerWithServer
     self.controller.heartbeatWithServer =\
     self.controller.heartbeatWithServer =\
-    Controller.Controller.registerWithServer
+      Controller.Controller.registerWithServer
 
 
   @patch("time.sleep")
   @patch("time.sleep")
   def test_registerAndHeartbeat(self, sleepMock):
   def test_registerAndHeartbeat(self, sleepMock):
@@ -300,6 +300,33 @@ class TestController(unittest.TestCase):
       Controller.Controller.registerWithServer
       Controller.Controller.registerWithServer
 
 
 
 
+  @patch("time.sleep")
+  @patch.object(Controller.Controller, "sendRequest")
+  def test_registerWithIOErrors(self, sendRequestMock, sleepMock):
+    # Check that server continues to heartbeat after connection errors
+    registerMock = MagicMock(name="Register")
+    registerMock.build.return_value = {}
+    actionQueue = MagicMock()
+    actionQueue.isIdle.return_value = True
+    self.controller.actionQueue = actionQueue
+    self.controller.register = registerMock
+    self.controller.responseId = 1
+    self.controller.TEST_IOERROR_COUNTER = 1
+    self.controller.isRegistered = False
+    def util_throw_IOErrors(*args, **kwargs):
+      """
+      Throws IOErrors 10 times and then stops heartbeats/registrations
+      """
+      if self.controller.TEST_IOERROR_COUNTER == 10:
+        self.controller.isRegistered = True
+      self.controller.TEST_IOERROR_COUNTER += 1
+      raise IOError("Sample error")
+    actionQueue.isIdle.return_value = False
+    sendRequestMock.side_effect = util_throw_IOErrors
+    self.controller.registerWithServer()
+    self.assertTrue(sendRequestMock.call_count > 5)
+
+
   @patch("os._exit")
   @patch("os._exit")
   def test_restartAgent(self, os_exit_mock):
   def test_restartAgent(self, os_exit_mock):
 
 
@@ -331,18 +358,22 @@ class TestController(unittest.TestCase):
       {'Content-Type': 'application/json'})
       {'Content-Type': 'application/json'})
 
 
     conMock.request.return_value = '{invalid_object}'
     conMock.request.return_value = '{invalid_object}'
-    actual = self.controller.sendRequest(url, data)
-    expected = {'exitstatus': 1, 'log': ('Response parsing failed! Request data: ' + data
-                                         + '; Response: {invalid_object}')}
-    self.assertEqual(actual, expected)
+
+    try:
+      self.controller.sendRequest(url, data)
+      self.fail("Should throw exception!")
+    except IOError, e: # Expected
+      self.assertEquals('Response parsing failed! Request data: ' + data +
+                        '; Response: {invalid_object}', e.message)
 
 
     exceptionMessage = "Connection Refused"
     exceptionMessage = "Connection Refused"
     conMock.request.side_effect = Exception(exceptionMessage)
     conMock.request.side_effect = Exception(exceptionMessage)
-    actual = self.controller.sendRequest(url, data)
-    expected = {'exitstatus': 1, 'log': 'Request to ' + url + ' failed due to ' + exceptionMessage}
-
-    self.assertEqual(actual, expected)
-
+    try:
+      self.controller.sendRequest(url, data)
+      self.fail("Should throw exception!")
+    except IOError, e: # Expected
+      self.assertEquals('Request to ' + url + ' failed due to ' +
+                        exceptionMessage, e.message)
 
 
 
 
   @patch.object(threading._Event, "wait")
   @patch.object(threading._Event, "wait")
@@ -480,6 +511,27 @@ class TestController(unittest.TestCase):
     response["restartAgent"] = "false"
     response["restartAgent"] = "false"
     self.controller.heartbeatWithServer()
     self.controller.heartbeatWithServer()
 
 
+    sleepMock.assert_called_with(
+      self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
+
+    # Check that server continues to heartbeat after connection errors
+    self.controller.responseId = 1
+    self.controller.TEST_IOERROR_COUNTER = 1
+    sendRequest.reset()
+    def util_throw_IOErrors(*args, **kwargs):
+      """
+      Throws IOErrors 100 times and then stops heartbeats/registrations
+      """
+      if self.controller.TEST_IOERROR_COUNTER == 10:
+        self.controller.DEBUG_STOP_HEARTBEATING = True
+      self.controller.TEST_IOERROR_COUNTER += 1
+      raise IOError("Sample error")
+    self.controller.DEBUG_STOP_HEARTBEATING = False
+    actionQueue.isIdle.return_value = False
+    sendRequest.side_effect = util_throw_IOErrors
+    self.controller.heartbeatWithServer()
+    self.assertTrue(sendRequest.call_count > 5)
+
     sleepMock.assert_called_with(
     sleepMock.assert_called_with(
       self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
       self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS)
 
 

+ 15 - 0
ambari-agent/src/test/python/ambari_agent/TestDataCleaner.py

@@ -22,6 +22,7 @@ limitations under the License.
 import unittest
 import unittest
 from mock.mock import patch, MagicMock, call, Mock
 from mock.mock import patch, MagicMock, call, Mock
 from ambari_agent import DataCleaner
 from ambari_agent import DataCleaner
+import AmbariConfig
 
 
 
 
 class TestDataCleaner(unittest.TestCase):
 class TestDataCleaner(unittest.TestCase):
@@ -41,6 +42,20 @@ class TestDataCleaner(unittest.TestCase):
     cleaner = DataCleaner.DataCleaner(config)
     cleaner = DataCleaner.DataCleaner(config)
     self.assertFalse(DataCleaner.logger.warn.called)
     self.assertFalse(DataCleaner.logger.warn.called)
 
 
+  def test_config(self):
+    """
+    Verify that if the config does not have a property, default values are used.
+    """
+    DataCleaner.logger.reset_mock()
+    config = AmbariConfig.AmbariConfig()
+    config.remove_option('agent', 'data_cleanup_max_age')
+    config.remove_option('agent', 'data_cleanup_interval')
+    config.remove_option('agent', 'data_cleanup_max_size_MB')
+    cleaner = DataCleaner.DataCleaner(config)
+
+    self.assertEqual(cleaner.file_max_age, 86400)
+    self.assertEqual(cleaner.cleanup_interval, 3600)
+    self.assertEqual(cleaner.cleanup_max_size_MB, 10000)
 
 
   def test_init_warn(self):
   def test_init_warn(self):
     config = MagicMock()
     config = MagicMock()

+ 4 - 11
ambari-common/src/main/python/ambari_commons/firewall.py

@@ -101,19 +101,12 @@ class UbuntuFirewallChecks(FirewallChecks):
         result = True
         result = True
     return result
     return result
 
 
-  def get_running_result(self):
-    # To support test code.  Expected ouput from run_os_command.
-    return (0, "ufw start/running", "")
-
-  def get_stopped_result(self):
-    # To support test code.  Expected output from run_os_command.
-    return (0, "ufw stop/waiting", "")
-
-
 class Fedora18FirewallChecks(FirewallChecks):
 class Fedora18FirewallChecks(FirewallChecks):
+  def __init__(self):
+    super(Fedora18FirewallChecks, self).__init__()
 
 
   def get_command(self):
   def get_command(self):
-    return "systemctl is-active iptables"
+    return "systemctl is-active %s" % (self.FIREWALL_SERVICE_NAME)
 
 
   def check_result(self, retcode, out, err):
   def check_result(self, retcode, out, err):
     result = False
     result = False
@@ -124,8 +117,8 @@ class Fedora18FirewallChecks(FirewallChecks):
 
 
 class SuseFirewallChecks(FirewallChecks):
 class SuseFirewallChecks(FirewallChecks):
   def __init__(self):
   def __init__(self):
+    super(SuseFirewallChecks, self).__init__()
     self.FIREWALL_SERVICE_NAME = "SuSEfirewall2"
     self.FIREWALL_SERVICE_NAME = "SuSEfirewall2"
-    self.SERVICE_SUBCMD = "status"
 
 
   def get_command(self):
   def get_command(self):
     return "%s %s" % (self.FIREWALL_SERVICE_NAME, self.SERVICE_SUBCMD)
     return "%s %s" % (self.FIREWALL_SERVICE_NAME, self.SERVICE_SUBCMD)

+ 6 - 10
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java

@@ -62,16 +62,12 @@ import org.apache.ambari.server.orm.PersistenceType;
 import org.apache.ambari.server.orm.dao.BlueprintDAO;
 import org.apache.ambari.server.orm.dao.BlueprintDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.GroupDAO;
 import org.apache.ambari.server.orm.dao.GroupDAO;
-import org.apache.ambari.server.orm.dao.MemberDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.dao.ViewDAO;
-import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.resources.ResourceManager;
 import org.apache.ambari.server.resources.ResourceManager;
@@ -79,7 +75,6 @@ import org.apache.ambari.server.resources.api.rest.GetResource;
 import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
 import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
 import org.apache.ambari.server.security.CertificateManager;
 import org.apache.ambari.server.security.CertificateManager;
 import org.apache.ambari.server.security.SecurityFilter;
 import org.apache.ambari.server.security.SecurityFilter;
-import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.authorization.AmbariAuthorizationFilter;
 import org.apache.ambari.server.security.authorization.AmbariAuthorizationFilter;
 import org.apache.ambari.server.security.authorization.AmbariLdapAuthenticationProvider;
 import org.apache.ambari.server.security.authorization.AmbariLdapAuthenticationProvider;
 import org.apache.ambari.server.security.authorization.AmbariLdapDataPopulator;
 import org.apache.ambari.server.security.authorization.AmbariLdapDataPopulator;
@@ -148,6 +143,11 @@ public class AmbariServer {
   @Inject
   @Inject
   @Named("dbInitNeeded")
   @Named("dbInitNeeded")
   boolean dbInitNeeded;
   boolean dbInitNeeded;
+  /**
+   * The singleton view registry.
+   */
+  @Inject
+  ViewRegistry viewRegistry;
 
 
   public String getServerOsType() {
   public String getServerOsType() {
     return configs.getServerOsType();
     return configs.getServerOsType();
@@ -303,7 +303,6 @@ public class AmbariServer {
       FailsafeHandlerList handlerList = new FailsafeHandlerList();
       FailsafeHandlerList handlerList = new FailsafeHandlerList();
 
 
       try {
       try {
-        ViewRegistry viewRegistry = ViewRegistry.getInstance();
         for (ViewInstanceEntity entity : viewRegistry.readViewArchives(configs)){
         for (ViewInstanceEntity entity : viewRegistry.readViewArchives(configs)){
           handlerList.addFailsafeHandler(viewRegistry.getWebAppContext(entity));
           handlerList.addFailsafeHandler(viewRegistry.getWebAppContext(entity));
         }
         }
@@ -543,10 +542,6 @@ public class AmbariServer {
         injector.getInstance(PermissionDAO.class), injector.getInstance(ResourceDAO.class));
         injector.getInstance(PermissionDAO.class), injector.getInstance(ResourceDAO.class));
     ClusterPrivilegeResourceProvider.init(injector.getInstance(ClusterDAO.class));
     ClusterPrivilegeResourceProvider.init(injector.getInstance(ClusterDAO.class));
     AmbariPrivilegeResourceProvider.init(injector.getInstance(ClusterDAO.class));
     AmbariPrivilegeResourceProvider.init(injector.getInstance(ClusterDAO.class));
-    ViewRegistry.init(injector.getInstance(ViewDAO.class), injector.getInstance(ViewInstanceDAO.class),
-        injector.getInstance(UserDAO.class), injector.getInstance(MemberDAO.class),
-        injector.getInstance(PrivilegeDAO.class), injector.getInstance(SecurityHelper.class),
-        injector.getInstance(ResourceDAO.class), injector.getInstance(ResourceTypeDAO.class));
   }
   }
 
 
   /**
   /**
@@ -588,6 +583,7 @@ public class AmbariServer {
       server = injector.getInstance(AmbariServer.class);
       server = injector.getInstance(AmbariServer.class);
       CertificateManager certMan = injector.getInstance(CertificateManager.class);
       CertificateManager certMan = injector.getInstance(CertificateManager.class);
       certMan.initRootCert();
       certMan.initRootCert();
+      ViewRegistry.initInstance(server.viewRegistry);
       ComponentSSLConfiguration.instance().init(server.configs);
       ComponentSSLConfiguration.instance().init(server.configs);
       server.run();
       server.run();
     } catch (Throwable t) {
     } catch (Throwable t) {

+ 30 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceConfigVersionResponse.java

@@ -30,9 +30,12 @@ public class ServiceConfigVersionResponse {
   private String serviceName;
   private String serviceName;
   private Long version;
   private Long version;
   private Long createTime;
   private Long createTime;
+  private Long groupId;
+  private String groupName;
   private String userName;
   private String userName;
   private String note;
   private String note;
   private List<ConfigurationResponse> configurations;
   private List<ConfigurationResponse> configurations;
+  private List<String> hosts;
 
 
   @JsonProperty("service_name")
   @JsonProperty("service_name")
   public String getServiceName() {
   public String getServiceName() {
@@ -100,5 +103,32 @@ public class ServiceConfigVersionResponse {
   public void setNote(String note) {
   public void setNote(String note) {
     this.note = note;
     this.note = note;
   }
   }
+
+  public List<String> getHosts() {
+    return hosts;
+  }
+
+  @JsonProperty("hosts")
+  public void setHosts(List<String> hosts) {
+    this.hosts = hosts;
+  }
+
+  @JsonProperty("group_name")
+  public String getGroupName() {
+    return groupName;
+  }
+
+  public void setGroupName(String groupName) {
+    this.groupName = groupName;
+  }
+
+  @JsonProperty("group_id")
+  public Long getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(Long groupId) {
+    this.groupId = groupId;
+  }
 }
 }
 
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java

@@ -552,7 +552,7 @@ public abstract class GangliaPropertyProvider extends AbstractPropertyProvider {
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
         if (LOG.isErrorEnabled()) {
         if (LOG.isErrorEnabled()) {
-          LOG.error("Caught exception getting Ganglia metrics : spec=" + spec, e);
+          LOG.error("Caught exception getting Ganglia metrics : spec=" + spec);
         }
         }
       } finally {
       } finally {
         if (reader != null) {
         if (reader != null) {

+ 15 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java

@@ -456,6 +456,11 @@ public class ConfigGroupResourceProvider extends
         request.getTag(), request.getDescription(),
         request.getTag(), request.getDescription(),
         request.getConfigs(), hosts);
         request.getConfigs(), hosts);
 
 
+      String serviceName = null;
+      if (configGroup.getConfigurations() != null) {
+        serviceName = cluster.getServiceForConfigTypes(configGroup.getConfigurations().keySet());
+      }
+
       // Persist before add, since id is auto-generated
       // Persist before add, since id is auto-generated
       configLogger.info("Persisting new Config group"
       configLogger.info("Persisting new Config group"
         + ", clusterName = " + cluster.getClusterName()
         + ", clusterName = " + cluster.getClusterName()
@@ -465,6 +470,9 @@ public class ConfigGroupResourceProvider extends
 
 
       configGroup.persist();
       configGroup.persist();
       cluster.addConfigGroup(configGroup);
       cluster.addConfigGroup(configGroup);
+      if (serviceName != null) {
+        cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(), null, configGroup);
+      }
 
 
       ConfigGroupResponse response = new ConfigGroupResponse(configGroup
       ConfigGroupResponse response = new ConfigGroupResponse(configGroup
         .getId(), configGroup.getClusterName(), configGroup.getName(),
         .getId(), configGroup.getClusterName(), configGroup.getName(),
@@ -509,6 +517,10 @@ public class ConfigGroupResourceProvider extends
                                  + ", clusterName = " + request.getClusterName()
                                  + ", clusterName = " + request.getClusterName()
                                  + ", groupId = " + request.getId());
                                  + ", groupId = " + request.getId());
       }
       }
+      String serviceName = null;
+      if (configGroup.getConfigurations() != null) {
+        serviceName = cluster.getServiceForConfigTypes(configGroup.getConfigurations().keySet());
+      }
 
 
       // Update hosts
       // Update hosts
       Map<String, Host> hosts = new HashMap<String, Host>();
       Map<String, Host> hosts = new HashMap<String, Host>();
@@ -541,6 +553,9 @@ public class ConfigGroupResourceProvider extends
         + ", user = " + getManagementController().getAuthName());
         + ", user = " + getManagementController().getAuthName());
 
 
       configGroup.persist();
       configGroup.persist();
+      if (serviceName != null) {
+        cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(), null, configGroup);
+      }
     }
     }
 
 
     getManagementController().getConfigHelper().invalidateStaleConfigsCache();
     getManagementController().getConfigHelper().invalidateStaleConfigsCache();

+ 10 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceConfigVersionResourceProvider.java

@@ -38,6 +38,9 @@ public class ServiceConfigVersionResourceProvider extends
   public static final String SERVICE_CONFIG_VERSION_CREATE_TIME_PROPERTY_ID = PropertyHelper.getPropertyId(null, "createtime");
   public static final String SERVICE_CONFIG_VERSION_CREATE_TIME_PROPERTY_ID = PropertyHelper.getPropertyId(null, "createtime");
   public static final String SERVICE_CONFIG_VERSION_USER_PROPERTY_ID = PropertyHelper.getPropertyId(null, "user");
   public static final String SERVICE_CONFIG_VERSION_USER_PROPERTY_ID = PropertyHelper.getPropertyId(null, "user");
   public static final String SERVICE_CONFIG_VERSION_NOTE_PROPERTY_ID = PropertyHelper.getPropertyId(null, "service_config_version_note");
   public static final String SERVICE_CONFIG_VERSION_NOTE_PROPERTY_ID = PropertyHelper.getPropertyId(null, "service_config_version_note");
+  public static final String SERVICE_CONFIG_VERSION_GROUP_ID_PROPERTY_ID = PropertyHelper.getPropertyId(null, "group_id");
+  public static final String SERVICE_CONFIG_VERSION_GROUP_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(null, "group_name");
+  public static final String SERVICE_CONFIG_VERSION_HOSTNAMES_PROPERTY_ID = PropertyHelper.getPropertyId(null, "hosts");
   public static final String SERVICE_CONFIG_VERSION_CONFIGURATIONS_PROPERTY_ID = PropertyHelper.getPropertyId(null, "configurations");
   public static final String SERVICE_CONFIG_VERSION_CONFIGURATIONS_PROPERTY_ID = PropertyHelper.getPropertyId(null, "configurations");
 
 
   /**
   /**
@@ -101,6 +104,9 @@ public class ServiceConfigVersionResourceProvider extends
       resource.setProperty(SERVICE_CONFIG_VERSION_CONFIGURATIONS_PROPERTY_ID,
       resource.setProperty(SERVICE_CONFIG_VERSION_CONFIGURATIONS_PROPERTY_ID,
           convertToSubResources(response.getClusterName(), response.getConfigurations()));
           convertToSubResources(response.getClusterName(), response.getConfigurations()));
       resource.setProperty(SERVICE_CONFIG_VERSION_NOTE_PROPERTY_ID, response.getNote());
       resource.setProperty(SERVICE_CONFIG_VERSION_NOTE_PROPERTY_ID, response.getNote());
+      resource.setProperty(SERVICE_CONFIG_VERSION_GROUP_ID_PROPERTY_ID, response.getGroupId());
+      resource.setProperty(SERVICE_CONFIG_VERSION_GROUP_NAME_PROPERTY_ID, response.getGroupName());
+      resource.setProperty(SERVICE_CONFIG_VERSION_HOSTNAMES_PROPERTY_ID, response.getHosts());
 
 
       resources.add(resource);
       resources.add(resource);
     }
     }
@@ -131,7 +137,10 @@ public class ServiceConfigVersionResourceProvider extends
       if (!propertyId.equals("cluster_name") && !propertyId.equals("serviceconfigversion") &&
       if (!propertyId.equals("cluster_name") && !propertyId.equals("serviceconfigversion") &&
           !propertyId.equals("service_name") && !propertyId.equals("createtime") &&
           !propertyId.equals("service_name") && !propertyId.equals("createtime") &&
           !propertyId.equals("appliedtime") && !propertyId.equals("user") &&
           !propertyId.equals("appliedtime") && !propertyId.equals("user") &&
-          !propertyId.equals("service_config_version_note")) {
+          !propertyId.equals("service_config_version_note") &&
+          !propertyId.equals("group_id") &&
+          !propertyId.equals("group_name") &&
+          !propertyId.equals("hosts")) {
 
 
         unsupportedProperties.add(propertyId);
         unsupportedProperties.add(propertyId);
 
 

+ 1 - 14
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewInstanceResourceProvider.java

@@ -300,7 +300,6 @@ public class ViewInstanceResourceProvider extends AbstractResourceProvider {
     }
     }
 
 
     Collection<ViewInstancePropertyEntity> instanceProperties = new HashSet<ViewInstancePropertyEntity>();
     Collection<ViewInstancePropertyEntity> instanceProperties = new HashSet<ViewInstancePropertyEntity>();
-    Collection<ViewInstanceDataEntity>     instanceData       = new HashSet<ViewInstanceDataEntity>();
 
 
     for (Map.Entry<String, Object> entry : properties.entrySet()) {
     for (Map.Entry<String, Object> entry : properties.entrySet()) {
 
 
@@ -317,24 +316,12 @@ public class ViewInstanceResourceProvider extends AbstractResourceProvider {
 
 
         instanceProperties.add(viewInstancePropertyEntity);
         instanceProperties.add(viewInstancePropertyEntity);
       } else if (propertyName.startsWith(DATA_PREFIX)) {
       } else if (propertyName.startsWith(DATA_PREFIX)) {
-        ViewInstanceDataEntity viewInstanceDataEntity = new ViewInstanceDataEntity();
-
-        viewInstanceDataEntity.setViewName(viewName);
-        viewInstanceDataEntity.setViewInstanceName(name);
-        viewInstanceDataEntity.setName(entry.getKey().substring(DATA_PREFIX.length()));
-        viewInstanceDataEntity.setUser(viewInstanceEntity.getCurrentUserName());
-        viewInstanceDataEntity.setValue((String) entry.getValue());
-        viewInstanceDataEntity.setViewInstanceEntity(viewInstanceEntity);
-
-        instanceData.add(viewInstanceDataEntity);
+        viewInstanceEntity.putInstanceData(entry.getKey().substring(DATA_PREFIX.length()), (String) entry.getValue());
       }
       }
     }
     }
     if (!instanceProperties.isEmpty()) {
     if (!instanceProperties.isEmpty()) {
       viewInstanceEntity.setProperties(instanceProperties);
       viewInstanceEntity.setProperties(instanceProperties);
     }
     }
-    if (!instanceData.isEmpty()) {
-      viewInstanceEntity.setData(instanceData);
-    }
 
 
     return viewInstanceEntity;
     return viewInstanceEntity;
   }
   }

+ 0 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java

@@ -551,7 +551,6 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
   private static String logException(Throwable throwable) {
   private static String logException(Throwable throwable) {
     String msg = "Caught exception getting JMX metrics : " + throwable.getLocalizedMessage();
     String msg = "Caught exception getting JMX metrics : " + throwable.getLocalizedMessage();
 
 
-    LOG.error(msg);
     LOG.debug(msg, throwable);
     LOG.debug(msg, throwable);
 
 
     return msg;
     return msg;

+ 14 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java

@@ -96,6 +96,20 @@ public class ClusterDAO {
     return daoUtils.selectOne(query);
     return daoUtils.selectOne(query);
   }
   }
 
 
+  @RequiresSession
+  public ClusterConfigEntity findConfig(Long clusterId, String type, Long version) {
+    CriteriaBuilder cb = entityManagerProvider.get().getCriteriaBuilder();
+    CriteriaQuery<ClusterConfigEntity> cq = cb.createQuery(ClusterConfigEntity.class);
+    Root<ClusterConfigEntity> config = cq.from(ClusterConfigEntity.class);
+    cq.where(cb.and(
+        cb.equal(config.get("clusterId"), clusterId)),
+      cb.equal(config.get("type"), type),
+      cb.equal(config.get("version"), version)
+    );
+    TypedQuery<ClusterConfigEntity> query = entityManagerProvider.get().createQuery(cq);
+    return daoUtils.selectOne(query);
+  }
+
   /**
   /**
    * Create Cluster entity in Database
    * Create Cluster entity in Database
    * @param clusterEntity entity to create
    * @param clusterEntity entity to create

+ 18 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ViewDAO.java

@@ -50,6 +50,24 @@ public class ViewDAO {
     return entityManagerProvider.get().find(ViewEntity.class, viewName);
     return entityManagerProvider.get().find(ViewEntity.class, viewName);
   }
   }
 
 
+  /**
+   * Find a view with a given common name.
+   *
+   * @param viewCommonName common name of view to find
+   *
+   * @return  a matching view or null
+   */
+  public ViewEntity findByCommonName(String viewCommonName) {
+    if (viewCommonName != null) {
+      for (ViewEntity viewEntity : findAll()) {
+        if (viewCommonName.equals(viewEntity.getCommonName())) {
+          return viewEntity;
+        }
+      }
+    }
+    return null;
+  }
+
   /**
   /**
    * Find all views.
    * Find all views.
    *
    *

+ 28 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java

@@ -20,7 +20,9 @@ package org.apache.ambari.server.orm.entities;
 
 
 import javax.persistence.Basic;
 import javax.persistence.Basic;
 import javax.persistence.CascadeType;
 import javax.persistence.CascadeType;
+import javax.persistence.CollectionTable;
 import javax.persistence.Column;
 import javax.persistence.Column;
+import javax.persistence.ElementCollection;
 import javax.persistence.Entity;
 import javax.persistence.Entity;
 import javax.persistence.GeneratedValue;
 import javax.persistence.GeneratedValue;
 import javax.persistence.GenerationType;
 import javax.persistence.GenerationType;
@@ -32,6 +34,7 @@ import javax.persistence.ManyToOne;
 import javax.persistence.OneToMany;
 import javax.persistence.OneToMany;
 import javax.persistence.Table;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
 import javax.persistence.TableGenerator;
+import java.util.Collection;
 import java.util.List;
 import java.util.List;
 
 
 @Entity
 @Entity
@@ -56,6 +59,10 @@ public class ServiceConfigEntity {
   @Column(name = "service_name", nullable = false)
   @Column(name = "service_name", nullable = false)
   private String serviceName;
   private String serviceName;
 
 
+  @Basic
+  @Column(name = "group_id", nullable = true)
+  private Long groupId;
+
   @Basic
   @Basic
   @Column(name = "version", nullable = false)
   @Column(name = "version", nullable = false)
   private Long version;
   private Long version;
@@ -72,6 +79,11 @@ public class ServiceConfigEntity {
   @Column(name = "note")
   @Column(name = "note")
   private String note;
   private String note;
 
 
+  @ElementCollection()
+  @CollectionTable(name = "serviceconfighosts", joinColumns = {@JoinColumn(name = "service_config_id")})
+  @Column(name = "hostname")
+  private List<String> hostNames;
+
   @ManyToMany
   @ManyToMany
   @JoinTable(
   @JoinTable(
     name = "serviceconfigmapping",
     name = "serviceconfigmapping",
@@ -155,4 +167,20 @@ public class ServiceConfigEntity {
   public void setNote(String note) {
   public void setNote(String note) {
     this.note = note;
     this.note = note;
   }
   }
+
+  public Long getGroupId() {
+    return groupId;
+  }
+
+  public void setGroupId(Long groupId) {
+    this.groupId = groupId;
+  }
+
+  public List<String> getHostNames() {
+    return hostNames;
+  }
+
+  public void setHostNames(List<String> hostNames) {
+    this.hostNames = hostNames;
+  }
 }
 }

+ 12 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewInstanceEntity.java

@@ -226,13 +226,24 @@ public class ViewInstanceEntity implements ViewInstanceDefinition {
    * @param name the instance name
    * @param name the instance name
    */
    */
   public ViewInstanceEntity(ViewEntity view, String name) {
   public ViewInstanceEntity(ViewEntity view, String name) {
+    this(view, name, view.getLabel());
+  }
+
+  /**
+   * Construct a view instance definition.
+   *
+   * @param view the parent view definition
+   * @param name the instance name
+   * @param label the instance label
+   */
+  public ViewInstanceEntity(ViewEntity view, String name, String label) {
     this.name = name;
     this.name = name;
     this.instanceConfig = null;
     this.instanceConfig = null;
     this.view = view;
     this.view = view;
     this.viewName = view.getName();
     this.viewName = view.getName();
     this.description = null;
     this.description = null;
     this.visible = 'Y';
     this.visible = 'Y';
-    this.label = view.getLabel();
+    this.label = label;
   }
   }
 
 
 
 

+ 5 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java

@@ -174,6 +174,11 @@ public interface Cluster {
    */
    */
   ServiceConfigVersionResponse addDesiredConfig(String user, Config config, String serviceConfigVersionNote);
   ServiceConfigVersionResponse addDesiredConfig(String user, Config config, String serviceConfigVersionNote);
 
 
+  ServiceConfigVersionResponse createServiceConfigVersion(String serviceName, String user, String note,
+                                                          ConfigGroup configGroup);
+
+  String getServiceForConfigTypes(Collection<String> configTypes);
+
   /**
   /**
    * Apply specified service config version (rollback)
    * Apply specified service config version (rollback)
    * @param serviceName service name
    * @param serviceName service name

+ 130 - 29
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java

@@ -85,7 +85,6 @@ import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.Injector;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.persist.Transactional;
 import com.google.inject.persist.Transactional;
-import org.springframework.security.core.GrantedAuthority;
 
 
 public class ClusterImpl implements Cluster {
 public class ClusterImpl implements Cluster {
 
 
@@ -1412,6 +1411,82 @@ public class ClusterImpl implements Cluster {
     }
     }
   }
   }
 
 
+
+  @Override
+  public ServiceConfigVersionResponse createServiceConfigVersion(String serviceName, String user, String note,
+                                                                 ConfigGroup configGroup) {
+
+    //create next service config version
+    ServiceConfigEntity serviceConfigEntity = new ServiceConfigEntity();
+    serviceConfigEntity.setServiceName(serviceName);
+    serviceConfigEntity.setClusterEntity(clusterEntity);
+    serviceConfigEntity.setVersion(configVersionHelper.getNextVersion(serviceName));
+    serviceConfigEntity.setUser(user);
+    serviceConfigEntity.setNote(note);
+
+    if (configGroup != null) {
+      Collection<Config> configs = configGroup.getConfigurations().values();
+      List<ClusterConfigEntity> configEntities = new ArrayList<ClusterConfigEntity>(configs.size());
+      for (Config config : configs) {
+        configEntities.add(clusterDAO.findConfig(getClusterId(), config.getType(), config.getVersion()));
+      }
+      serviceConfigEntity.setClusterConfigEntities(configEntities);
+
+      serviceConfigEntity.setHostNames(new ArrayList<String>(configGroup.getHosts().keySet()));
+
+    } else {
+      List<ClusterConfigEntity> configEntities = getClusterConfigEntitiesByService(serviceName);
+      serviceConfigEntity.setClusterConfigEntities(configEntities);
+    }
+
+    serviceConfigDAO.create(serviceConfigEntity);
+
+    ServiceConfigVersionResponse response = new ServiceConfigVersionResponse();
+    response.setUserName(user);
+    response.setClusterName(getClusterName());
+    response.setVersion(serviceConfigEntity.getVersion());
+    response.setServiceName(serviceConfigEntity.getServiceName());
+    response.setCreateTime(serviceConfigEntity.getCreateTimestamp());
+    response.setUserName(serviceConfigEntity.getUser());
+    response.setNote(serviceConfigEntity.getNote());
+    response.setGroupId(serviceConfigEntity.getGroupId());
+    response.setHosts(serviceConfigEntity.getHostNames());
+    response.setGroupName(configGroup != null ? configGroup.getName() : null);
+
+    return response;
+  }
+
+  @Override
+  public String getServiceForConfigTypes(Collection<String> configTypes) {
+    String serviceName = null;
+    for (String configType : configTypes) {
+      for (Entry<String, String> entry : serviceConfigTypes.entries()) {
+        if (StringUtils.equals(entry.getValue(), configType)) {
+          if (serviceName != null) {
+            if (entry.getKey()!=null && !StringUtils.equals(serviceName, entry.getKey())) {
+              throw new IllegalArgumentException("Config type {} belongs to {} service, " +
+                "but config group qualified for {}");
+            }
+          } else {
+            serviceName = entry.getKey();
+          }
+        }
+      }
+    }
+    return serviceName;
+  }
+
+  public String getServiceByConfigType(String configType) {
+    for (Entry<String, String> entry : serviceConfigTypes.entries()) {
+      String serviceName = entry.getKey();
+      String type = entry.getValue();
+      if (StringUtils.equals(type, configType)) {
+        return serviceName;
+      }
+    }
+    return null;
+  }
+
   @Override
   @Override
   public boolean setServiceConfigVersion(String serviceName, Long version, String user, String note) throws AmbariException {
   public boolean setServiceConfigVersion(String serviceName, Long version, String user, String note) throws AmbariException {
     if (null == user)
     if (null == user)
@@ -1470,6 +1545,7 @@ public class ClusterImpl implements Cluster {
           serviceConfigVersionResponse.setCreateTime(serviceConfigEntity.getCreateTimestamp());
           serviceConfigVersionResponse.setCreateTime(serviceConfigEntity.getCreateTimestamp());
           serviceConfigVersionResponse.setUserName(serviceConfigEntity.getUser());
           serviceConfigVersionResponse.setUserName(serviceConfigEntity.getUser());
           serviceConfigVersionResponse.setNote(serviceConfigEntity.getNote());
           serviceConfigVersionResponse.setNote(serviceConfigEntity.getNote());
+          serviceConfigVersionResponse.setHosts(serviceConfigEntity.getHostNames());
           serviceConfigVersionResponse.setConfigurations(new ArrayList<ConfigurationResponse>());
           serviceConfigVersionResponse.setConfigurations(new ArrayList<ConfigurationResponse>());
 
 
           List<ClusterConfigEntity> clusterConfigEntities = serviceConfigEntity.getClusterConfigEntities();
           List<ClusterConfigEntity> clusterConfigEntities = serviceConfigEntity.getClusterConfigEntities();
@@ -1480,6 +1556,17 @@ public class ClusterImpl implements Cluster {
               config.getPropertiesAttributes()));
               config.getPropertiesAttributes()));
           }
           }
 
 
+          Long groupId = serviceConfigEntity.getGroupId();
+          if (groupId != null) {
+            serviceConfigVersionResponse.setGroupId(groupId);
+            ConfigGroup configGroup = clusterConfigGroups.get(groupId);
+            if (configGroup != null) {
+              serviceConfigVersionResponse.setGroupName(configGroup.getName());
+            } else {
+              //TODO null or special name?
+            }
+          }
+
           serviceConfigVersionResponses.add(serviceConfigVersionResponse);
           serviceConfigVersionResponses.add(serviceConfigVersionResponse);
         }
         }
 
 
@@ -1534,16 +1621,43 @@ public class ClusterImpl implements Cluster {
     }
     }
 
 
     //disable all configs related to service
     //disable all configs related to service
-    Collection<String> configTypes = serviceConfigTypes.get(serviceName);
-    for (ClusterConfigMappingEntity entity : clusterEntity.getConfigMappingEntities()) {
-      if (configTypes.contains(entity.getType()) && entity.isSelected() > 0) {
-        entity.setSelected(0);
+    if (serviceConfigEntity.getGroupId() == null) {
+      Collection<String> configTypes = serviceConfigTypes.get(serviceName);
+      for (ClusterConfigMappingEntity entity : clusterEntity.getConfigMappingEntities()) {
+        if (configTypes.contains(entity.getType()) && entity.isSelected() > 0) {
+          entity.setSelected(0);
+        }
       }
       }
-    }
-    clusterDAO.merge(clusterEntity);
+      clusterDAO.merge(clusterEntity);
 
 
-    for (ClusterConfigEntity configEntity : serviceConfigEntity.getClusterConfigEntities()) {
-      selectConfig(configEntity.getType(), configEntity.getTag(), user);
+      for (ClusterConfigEntity configEntity : serviceConfigEntity.getClusterConfigEntities()) {
+        selectConfig(configEntity.getType(), configEntity.getTag(), user);
+      }
+    } else {
+      Long configGroupId = serviceConfigEntity.getGroupId();
+      ConfigGroup configGroup = clusterConfigGroups.get(configGroupId);
+      if (configGroup != null) {
+        Map<String, Config> groupDesiredConfigs = new HashMap<String, Config>();
+        for (ClusterConfigEntity entity : serviceConfigEntity.getClusterConfigEntities()) {
+          Config config = allConfigs.get(entity.getType()).get(entity.getTag());
+          groupDesiredConfigs.put(config.getType(), config);
+        }
+        configGroup.setConfigurations(groupDesiredConfigs);
+
+        Map<String, Host> groupDesiredHosts = new HashMap<String, Host>();
+        for (String hostname : serviceConfigEntity.getHostNames()) {
+          Host host = clusters.getHost(hostname);
+          if (host != null) {
+            groupDesiredHosts.put(hostname, host);
+          } else {
+            LOG.warn("Host {} doesn't exist anymore, skipping", hostname);
+          }
+        }
+        configGroup.setHosts(groupDesiredHosts);
+        configGroup.persist();
+      } else {
+        throw new IllegalArgumentException("Config group {} doesn't exist");
+      }
     }
     }
 
 
     ServiceConfigEntity serviceConfigEntityClone = new ServiceConfigEntity();
     ServiceConfigEntity serviceConfigEntityClone = new ServiceConfigEntity();
@@ -1553,6 +1667,8 @@ public class ClusterImpl implements Cluster {
     serviceConfigEntityClone.setClusterEntity(clusterEntity);
     serviceConfigEntityClone.setClusterEntity(clusterEntity);
     serviceConfigEntityClone.setClusterConfigEntities(serviceConfigEntity.getClusterConfigEntities());
     serviceConfigEntityClone.setClusterConfigEntities(serviceConfigEntity.getClusterConfigEntities());
     serviceConfigEntityClone.setClusterId(serviceConfigEntity.getClusterId());
     serviceConfigEntityClone.setClusterId(serviceConfigEntity.getClusterId());
+    serviceConfigEntityClone.setHostNames(serviceConfigEntity.getHostNames());
+    serviceConfigEntityClone.setGroupId(serviceConfigEntity.getGroupId());
     serviceConfigEntityClone.setNote(serviceConfigVersionNote);
     serviceConfigEntityClone.setNote(serviceConfigVersionNote);
     serviceConfigEntityClone.setVersion(configVersionHelper.getNextVersion(serviceName));
     serviceConfigEntityClone.setVersion(configVersionHelper.getNextVersion(serviceName));
 
 
@@ -1610,15 +1726,11 @@ public class ClusterImpl implements Cluster {
   private ServiceConfigVersionResponse createServiceConfigVersion(String serviceName, String user,
   private ServiceConfigVersionResponse createServiceConfigVersion(String serviceName, String user,
                                                                   String serviceConfigVersionNote) {
                                                                   String serviceConfigVersionNote) {
     //create next service config version
     //create next service config version
-    ServiceConfigEntity serviceConfigEntity = new ServiceConfigEntity();
-    serviceConfigEntity.setServiceName(serviceName);
-    serviceConfigEntity.setClusterEntity(clusterEntity);
-    serviceConfigEntity.setVersion(configVersionHelper.getNextVersion(serviceName));
-    serviceConfigEntity.setUser(user);
-    serviceConfigEntity.setNote(serviceConfigVersionNote);
+    return createServiceConfigVersion(serviceName, user, serviceConfigVersionNote, null);
+  }
 
 
+  private List<ClusterConfigEntity> getClusterConfigEntitiesByService(String serviceName) {
     List<ClusterConfigEntity> configEntities = new ArrayList<ClusterConfigEntity>();
     List<ClusterConfigEntity> configEntities = new ArrayList<ClusterConfigEntity>();
-    serviceConfigEntity.setClusterConfigEntities(configEntities);
 
 
     //add configs from this service
     //add configs from this service
     Collection<String> configTypes = serviceConfigTypes.get(serviceName);
     Collection<String> configTypes = serviceConfigTypes.get(serviceName);
@@ -1634,18 +1746,7 @@ public class ClusterImpl implements Cluster {
         }
         }
       }
       }
     }
     }
-
-    serviceConfigDAO.create(serviceConfigEntity);
-
-    ServiceConfigVersionResponse response = new ServiceConfigVersionResponse();
-    response.setUserName(user);
-    response.setClusterName(getClusterName());
-    response.setVersion(serviceConfigEntity.getVersion());
-    response.setServiceName(serviceConfigEntity.getServiceName());
-    response.setCreateTime(serviceConfigEntity.getCreateTimestamp());
-    response.setUserName(serviceConfigEntity.getUser());
-    response.setNote(serviceConfigEntity.getNote());
-    return response;
+    return configEntities;
   }
   }
 
 
   @Override
   @Override
@@ -1753,7 +1854,7 @@ public class ClusterImpl implements Cluster {
           ServiceComponentHost serviceComponentHost = serviceComponent.getServiceComponentHost(event.getHostName());
           ServiceComponentHost serviceComponentHost = serviceComponent.getServiceComponentHost(event.getHostName());
           serviceComponentHost.handleEvent(event);
           serviceComponentHost.handleEvent(event);
         } catch (AmbariException e) {
         } catch (AmbariException e) {
-          LOG.error("ServiceComponentHost lookup exception ", e);
+          LOG.error("ServiceComponentHost lookup exception ", e.getMessage());
           failedEvents.add(event);
           failedEvents.add(event);
         } catch (InvalidStateTransitionException e) {
         } catch (InvalidStateTransitionException e) {
           LOG.error("Invalid transition ", e);
           LOG.error("Invalid transition ", e);

+ 23 - 27
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java

@@ -18,16 +18,27 @@
 
 
 package org.apache.ambari.server.state.cluster;
 package org.apache.ambari.server.state.cluster;
 
 
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.persistence.RollbackException;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.agent.DiskInfo;
 import org.apache.ambari.server.agent.DiskInfo;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO;
 import org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
@@ -55,20 +66,11 @@ import org.apache.ambari.server.state.host.HostFactory;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.springframework.security.core.GrantedAuthority;
 import org.springframework.security.core.GrantedAuthority;
-import org.springframework.security.core.authority.SimpleGrantedAuthority;
 
 
-import javax.persistence.RollbackException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.gson.Gson;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
 
 
 @Singleton
 @Singleton
 public class ClustersImpl implements Clusters {
 public class ClustersImpl implements Clusters {
@@ -101,6 +103,8 @@ public class ClustersImpl implements Clusters {
   @Inject
   @Inject
   HostFactory hostFactory;
   HostFactory hostFactory;
   @Inject
   @Inject
+  Configuration configuration;
+  @Inject
   AmbariMetaInfo ambariMetaInfo;
   AmbariMetaInfo ambariMetaInfo;
   @Inject
   @Inject
   Gson gson;
   Gson gson;
@@ -707,7 +711,8 @@ public class ClustersImpl implements Clusters {
       // do nothing
       // do nothing
     }
     }
 
 
-    return (cluster == null && readOnly) || checkPermission(cluster, readOnly);
+    return (cluster == null && readOnly) || !configuration.getApiAuthentication()
+      || checkPermission(cluster, readOnly);
   }
   }
 
 
   /**
   /**
@@ -737,15 +742,6 @@ public class ClustersImpl implements Clusters {
           }
           }
         }
         }
       }
       }
-
-      // SimpleGrantedAuthority is required by InternalAuthenticationToken for internal authorization by token
-      if (grantedAuthority instanceof SimpleGrantedAuthority){
-        SimpleGrantedAuthority authority = (SimpleGrantedAuthority) grantedAuthority;
-        if ("AMBARI.ADMIN".equals(authority.getAuthority())) {
-          return true;
-        }
-
-      }
     }
     }
     // TODO : should we log this?
     // TODO : should we log this?
     return false;
     return false;

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java

@@ -741,7 +741,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
           saveIfPersisted();
           saveIfPersisted();
           // TODO Audit logs
           // TODO Audit logs
         } catch (InvalidStateTransitionException e) {
         } catch (InvalidStateTransitionException e) {
-          LOG.error("Can't handle ServiceComponentHostEvent event at"
+          LOG.debug("Can't handle ServiceComponentHostEvent event at"
               + " current state"
               + " current state"
               + ", serviceComponentName=" + this.getServiceComponentName()
               + ", serviceComponentName=" + this.getServiceComponentName()
               + ", hostName=" + this.getHostName()
               + ", hostName=" + this.getHostName()

+ 3 - 5
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java

@@ -453,10 +453,7 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
 
 
     //add new sequences for config groups
     //add new sequences for config groups
     //TODO evalate possibility to automatically wrap object names in DBAcessor
     //TODO evalate possibility to automatically wrap object names in DBAcessor
-    String valueColumnName = "\"value\"";
-    if (Configuration.ORACLE_DB_NAME.equals(dbType) || Configuration.MYSQL_DB_NAME.equals(dbType)) {
-      valueColumnName = "value";
-    }
+    String valueColumnName = "sequence_value";
 
 
     dbAccessor.executeQuery("INSERT INTO ambari_sequences(sequence_name, " + valueColumnName + ") " +
     dbAccessor.executeQuery("INSERT INTO ambari_sequences(sequence_name, " + valueColumnName + ") " +
       "VALUES('configgroup_id_seq', 1)", true);
       "VALUES('configgroup_id_seq', 1)", true);
@@ -713,6 +710,7 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
                   configEntity.setType(configType);
                   configEntity.setType(configType);
                   configEntity.setTag(defaultVersionTag);
                   configEntity.setTag(defaultVersionTag);
                   configEntity.setData(configData);
                   configEntity.setData(configData);
+                  configEntity.setVersion(1L);
                   configEntity.setTimestamp(System.currentTimeMillis());
                   configEntity.setTimestamp(System.currentTimeMillis());
                   configEntity.setClusterEntity(clusterEntity);
                   configEntity.setClusterEntity(clusterEntity);
                   LOG.debug("Creating new " + configType + " config...");
                   LOG.debug("Creating new " + configType + " config...");
@@ -809,7 +807,7 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
   }
   }
 
 
   private String getPostgresSequenceUpgradeQuery() {
   private String getPostgresSequenceUpgradeQuery() {
-    return "INSERT INTO ambari_sequences(sequence_name, \"value\") " +
+    return "INSERT INTO ambari_sequences(sequence_name, sequence_value) " +
       "SELECT 'cluster_id_seq', nextval('clusters_cluster_id_seq') " +
       "SELECT 'cluster_id_seq', nextval('clusters_cluster_id_seq') " +
       "UNION ALL " +
       "UNION ALL " +
       "SELECT 'user_id_seq', nextval('users_user_id_seq') " +
       "SELECT 'user_id_seq', nextval('users_user_id_seq') " +

+ 2 - 10
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java

@@ -24,7 +24,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessor;
 
 
 import com.google.inject.Inject;
 import com.google.inject.Inject;
@@ -35,7 +34,7 @@ import com.google.inject.Injector;
  */
  */
 public class UpgradeCatalog160 extends AbstractUpgradeCatalog {
 public class UpgradeCatalog160 extends AbstractUpgradeCatalog {
 
 
-  //SourceVersion is only for book-keeping purpos  
+  //SourceVersion is only for book-keeping purpos
   @Override
   @Override
   public String getSourceVersion() {
   public String getSourceVersion() {
     return "1.5.1";
     return "1.5.1";
@@ -99,15 +98,8 @@ public class UpgradeCatalog160 extends AbstractUpgradeCatalog {
 
 
   @Override
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
   protected void executeDMLUpdates() throws AmbariException, SQLException {
-    String dbType = getDbType();
-
     //add new sequences for view entity
     //add new sequences for view entity
-    String valueColumnName = "\"value\"";
-    if (Configuration.ORACLE_DB_NAME.equals(dbType) || Configuration.MYSQL_DB_NAME.equals(dbType)) {
-      valueColumnName = "value";
-    }
-
-    dbAccessor.executeQuery("INSERT INTO ambari_sequences(sequence_name, " + valueColumnName + ") " +
+    dbAccessor.executeQuery("INSERT INTO ambari_sequences(sequence_name, sequence_value) " +
         "VALUES('viewentity_id_seq', 0)", true);
         "VALUES('viewentity_id_seq', 0)", true);
 
 
     // Add missing property for YARN
     // Add missing property for YARN

+ 13 - 23
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java

@@ -23,10 +23,7 @@ import java.sql.SQLException;
 import java.sql.Types;
 import java.sql.Types;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.List;
-import java.util.Map;
-
 import javax.persistence.EntityManager;
 import javax.persistence.EntityManager;
 import javax.persistence.TypedQuery;
 import javax.persistence.TypedQuery;
 
 
@@ -58,7 +55,7 @@ public class UpgradeCatalog161 extends AbstractUpgradeCatalog {
    */
    */
   private static final Logger LOG = LoggerFactory.getLogger
   private static final Logger LOG = LoggerFactory.getLogger
       (UpgradeCatalog161.class);
       (UpgradeCatalog161.class);
-  
+
   // ----- Constructors ------------------------------------------------------
   // ----- Constructors ------------------------------------------------------
 
 
   @Inject
   @Inject
@@ -249,10 +246,10 @@ public class UpgradeCatalog161 extends AbstractUpgradeCatalog {
     // Add constraints
     // Add constraints
     dbAccessor.addFKConstraint("requestoperationlevel", "FK_req_op_level_req_id",
     dbAccessor.addFKConstraint("requestoperationlevel", "FK_req_op_level_req_id",
             "request_id", "request", "request_id", true);
             "request_id", "request", "request_id", true);
-    
+
     // Clusters
     // Clusters
-    dbAccessor.addColumn("clusters", new DBColumnInfo("provisioning_state", String.class, 255, State.INIT.name(), false));    
-    
+    dbAccessor.addColumn("clusters", new DBColumnInfo("provisioning_state", String.class, 255, State.INIT.name(), false));
+
     dbAccessor.dropConstraint("stage", "FK_stage_cluster_id", true);
     dbAccessor.dropConstraint("stage", "FK_stage_cluster_id", true);
     dbAccessor.dropConstraint("request", "FK_request_cluster_id", true);
     dbAccessor.dropConstraint("request", "FK_request_cluster_id", true);
   }
   }
@@ -262,40 +259,33 @@ public class UpgradeCatalog161 extends AbstractUpgradeCatalog {
 
 
   @Override
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
   protected void executeDMLUpdates() throws AmbariException, SQLException {
-    String dbType = getDbType();
-
-    String valueColumnName = "\"value\"";
-    if (Configuration.ORACLE_DB_NAME.equals(dbType) || Configuration.MYSQL_DB_NAME.equals(dbType)) {
-      valueColumnName = "value";
-    }
-    
     //add new sequences for operation level
     //add new sequences for operation level
-    dbAccessor.executeQuery("INSERT INTO ambari_sequences(sequence_name, " + valueColumnName + ") " +
+    dbAccessor.executeQuery("INSERT INTO ambari_sequences(sequence_name, sequence_value) " +
             "VALUES('operation_level_id_seq', 1)", true);
             "VALUES('operation_level_id_seq', 1)", true);
-    
+
     // upgrade cluster provision state
     // upgrade cluster provision state
-    executeInTransaction(new Runnable() { 
+    executeInTransaction(new Runnable() {
       @Override
       @Override
       public void run() {
       public void run() {
-        // it should be safe to bulk update the current cluster state since 
+        // it should be safe to bulk update the current cluster state since
         // this field is not currently used and since all clusters stored in
         // this field is not currently used and since all clusters stored in
         // the database must (at this point) be installed
         // the database must (at this point) be installed
-        final EntityManager em = getEntityManagerProvider().get();        
+        final EntityManager em = getEntityManagerProvider().get();
         final TypedQuery<ClusterEntity> query = em.createQuery(
         final TypedQuery<ClusterEntity> query = em.createQuery(
-            "UPDATE ClusterEntity SET provisioningState = :provisioningState", 
+            "UPDATE ClusterEntity SET provisioningState = :provisioningState",
             ClusterEntity.class);
             ClusterEntity.class);
 
 
         query.setParameter("provisioningState", State.INSTALLED);
         query.setParameter("provisioningState", State.INSTALLED);
         final int updatedClusterProvisionedStateCount = query.executeUpdate();
         final int updatedClusterProvisionedStateCount = query.executeUpdate();
-        
+
         LOG.info("Updated {} cluster provisioning states to {}",
         LOG.info("Updated {} cluster provisioning states to {}",
             updatedClusterProvisionedStateCount, State.INSTALLED);
             updatedClusterProvisionedStateCount, State.INSTALLED);
       }
       }
     });
     });
-    
+
     addMissingConfigs();
     addMissingConfigs();
   }
   }
-  
+
   protected void addMissingConfigs() throws AmbariException {
   protected void addMissingConfigs() throws AmbariException {
     updateConfigurationProperties("hbase-site", Collections.singletonMap("hbase.regionserver.info.port", "60030"), false, false);
     updateConfigurationProperties("hbase-site", Collections.singletonMap("hbase.regionserver.info.port", "60030"), false, false);
     updateConfigurationProperties("hbase-site", Collections.singletonMap("hbase.master.info.port", "60010"), false, false);
     updateConfigurationProperties("hbase-site", Collections.singletonMap("hbase.master.info.port", "60010"), false, false);

+ 71 - 0
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java

@@ -46,6 +46,7 @@ import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.KeyValueDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
 import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
@@ -58,6 +59,7 @@ import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity_;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity_;
+import org.apache.ambari.server.orm.entities.KeyValueEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
 import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
 import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
@@ -73,6 +75,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.alert.Scope;
 import org.apache.ambari.server.state.alert.Scope;
+import org.apache.ambari.server.view.configuration.InstanceConfig;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -97,6 +100,10 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
   private static final String ALERT_TABLE_GROUP_TARGET = "alert_group_target";
   private static final String ALERT_TABLE_GROUP_TARGET = "alert_group_target";
   private static final String ALERT_TABLE_GROUPING = "alert_grouping";
   private static final String ALERT_TABLE_GROUPING = "alert_grouping";
   private static final String ALERT_TABLE_NOTICE = "alert_notice";
   private static final String ALERT_TABLE_NOTICE = "alert_notice";
+  public static final String JOBS_VIEW_NAME = "JOBS";
+  public static final String JOBS_VIEW_INSTANCE_NAME = "JOBS_1";
+  public static final String SHOW_JOBS_FOR_NON_ADMIN_KEY = "showJobsForNonAdmin";
+  public static final String JOBS_VIEW_INSTANCE_LABEL = "Jobs";
 
 
   //SourceVersion is only for book-keeping purpos
   //SourceVersion is only for book-keeping purpos
   @Override
   @Override
@@ -561,6 +568,7 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
     addMissingConfigs();
     addMissingConfigs();
     renamePigProperties();
     renamePigProperties();
     upgradePermissionModel();
     upgradePermissionModel();
+    addJobsViewPermissions();
   }
   }
 
 
   /**
   /**
@@ -982,4 +990,67 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
       }
       }
     }
     }
   }
   }
+
+  protected void addJobsViewPermissions() {
+
+    final UserDAO userDAO = injector.getInstance(UserDAO.class);
+    final ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
+    final ResourceDAO resourceDAO = injector.getInstance(ResourceDAO.class);
+    final ViewDAO viewDAO = injector.getInstance(ViewDAO.class);
+    final ViewInstanceDAO viewInstanceDAO = injector.getInstance(ViewInstanceDAO.class);
+    final KeyValueDAO keyValueDAO = injector.getInstance(KeyValueDAO.class);
+    final PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
+    final PrivilegeDAO privilegeDAO = injector.getInstance(PrivilegeDAO.class);
+
+    ViewEntity jobsView = viewDAO.findByCommonName(JOBS_VIEW_NAME);
+    if (jobsView != null) {
+      ViewInstanceEntity jobsInstance = jobsView.getInstanceDefinition(JOBS_VIEW_INSTANCE_NAME);
+      if (jobsInstance == null) {
+        jobsInstance = new ViewInstanceEntity(jobsView, JOBS_VIEW_INSTANCE_NAME, JOBS_VIEW_INSTANCE_LABEL);
+        ResourceEntity resourceEntity = new ResourceEntity();
+        resourceEntity.setResourceType(resourceTypeDAO.findByName(
+            ViewEntity.getViewName(
+                jobsView.getCommonName(),
+                jobsView.getVersion())));
+        jobsInstance.setResource(resourceEntity);
+        jobsView.addInstanceDefinition(jobsInstance);
+        resourceDAO.create(resourceEntity);
+        viewInstanceDAO.create(jobsInstance);
+        viewDAO.merge(jobsView);
+      }
+      // get showJobsForNonAdmin value and remove it
+      boolean showJobsForNonAdmin = false;
+      KeyValueEntity showJobsKeyValueEntity = keyValueDAO.findByKey(SHOW_JOBS_FOR_NON_ADMIN_KEY);
+      if (showJobsKeyValueEntity != null) {
+        String value = showJobsKeyValueEntity.getValue();
+        showJobsForNonAdmin = Boolean.parseBoolean(value);
+        keyValueDAO.remove(showJobsKeyValueEntity);
+      }
+      if (showJobsForNonAdmin) {
+        ResourceEntity jobsResource = jobsInstance.getResource();
+        PermissionEntity viewUsePermission = permissionDAO.findViewUsePermission();
+        for (UserEntity userEntity : userDAO.findAll()) {
+          // check if user has VIEW.USE privilege for JOBS view
+          List<PrivilegeEntity> privilegeEntities = privilegeDAO.findAllByPrincipal(
+              Collections.singletonList(userEntity.getPrincipal()));
+          boolean hasJobsUsePrivilege = false;
+          for (PrivilegeEntity privilegeEntity : privilegeEntities) {
+            if (privilegeEntity.getResource().getId() == jobsInstance.getResource().getId() &&
+                privilegeEntity.getPermission().getId() == viewUsePermission.getId()) {
+              hasJobsUsePrivilege = true;
+              break;
+            }
+          }
+          // if not - add VIEW.use privilege
+          if (!hasJobsUsePrivilege) {
+            PrivilegeEntity privilegeEntity = new PrivilegeEntity();
+            privilegeEntity.setResource(jobsResource);
+            privilegeEntity.setPermission(viewUsePermission);
+            privilegeEntity.setPrincipal(userEntity.getPrincipal());
+            privilegeDAO.create(privilegeEntity);
+          }
+        }
+      }
+    }
+  }
 }
 }

+ 80 - 224
ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java

@@ -40,6 +40,8 @@ import java.util.Set;
 import java.util.jar.JarEntry;
 import java.util.jar.JarEntry;
 import java.util.jar.JarFile;
 import java.util.jar.JarFile;
 
 
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBException;
 import javax.xml.bind.JAXBException;
 import javax.xml.bind.Unmarshaller;
 import javax.xml.bind.Unmarshaller;
@@ -103,6 +105,7 @@ import com.google.inject.Injector;
 /**
 /**
  * Registry for view and view instance definitions.
  * Registry for view and view instance definitions.
  */
  */
+@Singleton
 public class ViewRegistry {
 public class ViewRegistry {
 
 
   /**
   /**
@@ -144,7 +147,7 @@ public class ViewRegistry {
   /**
   /**
    * The singleton view registry instance.
    * The singleton view registry instance.
    */
    */
-  private static final ViewRegistry singleton = new ViewRegistry();
+  private static ViewRegistry singleton;
 
 
   /**
   /**
    * The logger.
    * The logger.
@@ -154,50 +157,56 @@ public class ViewRegistry {
   /**
   /**
    * View data access object.
    * View data access object.
    */
    */
-  private static ViewDAO viewDAO;
+  @Inject
+  ViewDAO viewDAO;
 
 
   /**
   /**
    * View instance data access object.
    * View instance data access object.
    */
    */
-  private static ViewInstanceDAO instanceDAO;
+  @Inject
+  ViewInstanceDAO instanceDAO;
 
 
   /**
   /**
    * User data access object.
    * User data access object.
    */
    */
-  private static UserDAO userDAO;
+  @Inject
+  UserDAO userDAO;
 
 
   /**
   /**
    * Group member data access object.
    * Group member data access object.
    */
    */
-  private static MemberDAO memberDAO;
+  @Inject
+  MemberDAO memberDAO;
 
 
   /**
   /**
    * Privilege data access object.
    * Privilege data access object.
    */
    */
-  private static PrivilegeDAO privilegeDAO;
+  @Inject
+  PrivilegeDAO privilegeDAO;
 
 
   /**
   /**
    * Helper with security related utilities.
    * Helper with security related utilities.
    */
    */
-  private static SecurityHelper securityHelper;
+  @Inject
+  SecurityHelper securityHelper;
 
 
   /**
   /**
    * Resource data access object.
    * Resource data access object.
    */
    */
-  private static ResourceDAO resourceDAO;
+  @Inject
+  ResourceDAO resourceDAO;
 
 
   /**
   /**
    * Resource type data access object.
    * Resource type data access object.
    */
    */
-  private static ResourceTypeDAO resourceTypeDAO;
-
-  // ----- Constructors ------------------------------------------------------
+  @Inject
+  ResourceTypeDAO resourceTypeDAO;
 
 
   /**
   /**
-   * Hide the constructor for this singleton.
+   * Ambari configuration.
    */
    */
-  private ViewRegistry() {
-  }
+  @Inject
+  Configuration configuration;
 
 
 
 
   // ----- ViewRegistry ------------------------------------------------------
   // ----- ViewRegistry ------------------------------------------------------
@@ -327,6 +336,15 @@ public class ViewRegistry {
     }
     }
   }
   }
 
 
+  /**
+   * Init the singleton instance.
+   *
+   * @param singleton  the view registry
+   */
+  public static void initInstance(ViewRegistry singleton) {
+    ViewRegistry.singleton = singleton;
+  }
+
   /**
   /**
    * Get the view registry singleton.
    * Get the view registry singleton.
    *
    *
@@ -469,11 +487,7 @@ public class ViewRegistry {
 
 
         ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findByName(ViewEntity.getViewName(viewName, version));
         ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findByName(ViewEntity.getViewName(viewName, version));
         // create an admin resource to represent this view instance
         // create an admin resource to represent this view instance
-        ResourceEntity resourceEntity = new ResourceEntity();
-        resourceEntity.setResourceType(resourceTypeEntity);
-        resourceDAO.create(resourceEntity);
-
-        instanceEntity.setResource(resourceEntity);
+        instanceEntity.setResource(createViewInstanceResource(resourceTypeEntity));
 
 
         instanceDAO.merge(instanceEntity);
         instanceDAO.merge(instanceEntity);
 
 
@@ -519,29 +533,8 @@ public class ViewRegistry {
     ViewEntity viewEntity = getDefinition(instanceEntity.getViewName());
     ViewEntity viewEntity = getDefinition(instanceEntity.getViewName());
 
 
     if (viewEntity != null) {
     if (viewEntity != null) {
-      String instanceName = instanceEntity.getName();
-      String viewName     = viewEntity.getCommonName();
-      String version      = viewEntity.getVersion();
-
-      ViewInstanceEntity entity = getInstanceDefinition(viewName, version, instanceName);
-
-      if (entity != null) {
-        if (entity.isXmlDriven()) {
-          throw new IllegalStateException("View instances defined via xml can't be updated through api requests");
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Updating view instance " + viewName + "/" +
-              version + "/" + instanceName);
-        }
-        entity.setLabel(instanceEntity.getLabel());
-        entity.setDescription(instanceEntity.getDescription());
-        entity.setVisible(instanceEntity.isVisible());
-        entity.setProperties(instanceEntity.getProperties());
-        entity.setData(instanceEntity.getData());
-
-        instanceEntity.validate(viewEntity);
-        instanceDAO.merge(entity);
-      }
+      instanceEntity.validate(viewEntity);
+      instanceDAO.merge(instanceEntity);
     }
     }
   }
   }
 
 
@@ -717,7 +710,8 @@ public class ViewRegistry {
 
 
     ResourceEntity resourceEntity = instanceEntity == null ? null : instanceEntity.getResource();
     ResourceEntity resourceEntity = instanceEntity == null ? null : instanceEntity.getResource();
 
 
-    return (resourceEntity == null && readOnly) || checkAuthorization(resourceEntity);
+    return !configuration.getApiAuthentication() ||
+        (resourceEntity == null && readOnly) || checkAuthorization(resourceEntity);
   }
   }
 
 
   /**
   /**
@@ -725,35 +719,21 @@ public class ViewRegistry {
    * based on the permissions granted to the current user.
    * based on the permissions granted to the current user.
    *
    *
    * @param definitionEntity  the view definition entity
    * @param definitionEntity  the view definition entity
-   * @param readOnly        indicate whether or not this is for a read only operation
    *
    *
    * @return true if the view instance should be included based on the permissions of the current user
    * @return true if the view instance should be included based on the permissions of the current user
    */
    */
   public boolean includeDefinition(ViewEntity definitionEntity) {
   public boolean includeDefinition(ViewEntity definitionEntity) {
 
 
-    ViewRegistry viewRegistry = ViewRegistry.getInstance();
-
-    for (GrantedAuthority grantedAuthority : securityHelper.getCurrentAuthorities()) {
-      if (grantedAuthority instanceof AmbariGrantedAuthority) {
-
-        AmbariGrantedAuthority authority = (AmbariGrantedAuthority) grantedAuthority;
-        PrivilegeEntity privilegeEntity = authority.getPrivilegeEntity();
-        Integer permissionId = privilegeEntity.getPermission().getId();
-
-        // admin has full access
-        if (permissionId.equals(PermissionEntity.AMBARI_ADMIN_PERMISSION)) {
-          return true;
-        }
-      }
+    if (checkPermission(null, false)) {
+      return true;
     }
     }
 
 
-    boolean allowed = false;
-
     for (ViewInstanceEntity instanceEntity: definitionEntity.getInstances()) {
     for (ViewInstanceEntity instanceEntity: definitionEntity.getInstances()) {
-      allowed |= viewRegistry.checkPermission(instanceEntity, true);
+      if (checkPermission(instanceEntity, true) ) {
+        return true;
+      }
     }
     }
-
-    return allowed;
+    return false;
   }
   }
 
 
 
 
@@ -1044,16 +1024,18 @@ public class ViewRegistry {
    * Sync given view with data in DB. Ensures that view data in DB is updated,
    * Sync given view with data in DB. Ensures that view data in DB is updated,
    * all instances changes from xml config are reflected to DB
    * all instances changes from xml config are reflected to DB
    *
    *
-   * @param view view config from xml
-   * @param instanceDefinitions view instances from xml
-   * @throws Exception
+   * @param view                 view config from xml
+   * @param instanceDefinitions  view instances from xml
+   *
+   * @throws Exception if the view can not be synced
    */
    */
   private void syncView(ViewEntity view,
   private void syncView(ViewEntity view,
                         Set<ViewInstanceEntity> instanceDefinitions)
                         Set<ViewInstanceEntity> instanceDefinitions)
       throws Exception {
       throws Exception {
-    String viewName = view.getName();
 
 
-    ViewEntity persistedView = viewDAO.findByName(viewName);
+    String             viewName      = view.getName();
+    ViewEntity         persistedView = viewDAO.findByName(viewName);
+    ResourceTypeEntity resourceType  = view.getResourceType();
 
 
     // if the view is not yet persisted ...
     // if the view is not yet persisted ...
     if (persistedView == null) {
     if (persistedView == null) {
@@ -1064,29 +1046,15 @@ public class ViewRegistry {
       // get or create an admin resource type to represent this view
       // get or create an admin resource type to represent this view
       ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findByName(viewName);
       ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findByName(viewName);
       if (resourceTypeEntity == null) {
       if (resourceTypeEntity == null) {
-        resourceTypeEntity = view.getResourceType();
+        resourceTypeEntity = resourceType;
         resourceTypeDAO.create(resourceTypeEntity);
         resourceTypeDAO.create(resourceTypeEntity);
       }
       }
 
 
       for( ViewInstanceEntity instance : view.getInstances()) {
       for( ViewInstanceEntity instance : view.getInstances()) {
-
-        // create an admin resource to represent this view instance
-        ResourceEntity resourceEntity = new ResourceEntity();
-        resourceEntity.setResourceType(view.getResourceType());
-        resourceDAO.create(resourceEntity);
-
-        instance.setResource(resourceEntity);
-      }
-      // ... merge it
-      viewDAO.merge(view);
-
-      persistedView = viewDAO.findByName(viewName);
-      if (persistedView == null) {
-        String message = "View  " + viewName + " can not be found.";
-
-        LOG.error(message);
-        throw new IllegalStateException(message);
+        instance.setResource(createViewInstanceResource(resourceType));
       }
       }
+      // ... merge the view
+      persistedView = viewDAO.merge(view);
     }
     }
 
 
     Map<String, ViewInstanceEntity> xmlInstanceEntityMap = new HashMap<String, ViewInstanceEntity>();
     Map<String, ViewInstanceEntity> xmlInstanceEntityMap = new HashMap<String, ViewInstanceEntity>();
@@ -1099,60 +1067,44 @@ public class ViewRegistry {
 
 
     // make sure that each instance of the view in the db is reflected in the given view
     // make sure that each instance of the view in the db is reflected in the given view
     for (ViewInstanceEntity persistedInstance : persistedView.getInstances()){
     for (ViewInstanceEntity persistedInstance : persistedView.getInstances()){
-      String instanceName = persistedInstance.getName();
 
 
-      ViewInstanceEntity instance =
-          view.getInstanceDefinition(instanceName);
+      String             instanceName = persistedInstance.getName();
+      ViewInstanceEntity instance     = view.getInstanceDefinition(instanceName);
 
 
-      if (persistedInstance.isXmlDriven() && !xmlInstanceEntityMap.containsKey(instanceName)) {
-        instanceDAO.remove(persistedInstance);
-        xmlInstanceEntityMap.remove(instanceName);
-        continue;
-      }
       xmlInstanceEntityMap.remove(instanceName);
       xmlInstanceEntityMap.remove(instanceName);
 
 
-      // if the persisted instance is not in the registry ...
+      // if the persisted instance is not in the view ...
       if (instance == null) {
       if (instance == null) {
-        // ... create and add it
-        instance = new ViewInstanceEntity(view, instanceName);
-        bindViewInstance(view, instance);
-        instanceDefinitions.add(instance);
-      }
-      instance.setViewInstanceId(persistedInstance.getViewInstanceId());
-
-      if (instance.isXmlDriven()) {
-        // override db data with data from {@InstanceConfig}
-        persistedInstance.setLabel(instance.getLabel());
-        persistedInstance.setDescription(instance.getDescription());
-        persistedInstance.setVisible(instance.isVisible());
-        persistedInstance.setIcon(instance.getIcon());
-        persistedInstance.setIcon64(instance.getIcon64());
-        persistedInstance.setProperties(instance.getProperties());
-
-        instanceDAO.merge(persistedInstance);
+        if (persistedInstance.isXmlDriven()) {
+          // this instance was persisted from an earlier view.xml but has been removed...
+          // remove it from the db
+          instanceDAO.remove(persistedInstance);
+        } else {
+          // this instance was not specified in the view.xml but was added through the API...
+          // bind it to the view and add it to the registry
+          instanceDAO.merge(persistedInstance);
+          bindViewInstance(view, persistedInstance);
+          instanceDefinitions.add(persistedInstance);
+        }
       } else {
       } else {
-        // apply the persisted overrides to the in-memory instance
-        view.removeInstanceDefinition(instanceName);
-        view.addInstanceDefinition(persistedInstance);
+        instance.setResource(persistedInstance.getResource());
       }
       }
-
-      instance.setResource(persistedInstance.getResource());
     }
     }
 
 
-    // these instances appear in the archive but not present in the db... add
-    // them to db and registry
+    // these instances appear in the view.xml but are not present in the db...
+    // add them to db
     for (ViewInstanceEntity instance : xmlInstanceEntityMap.values()) {
     for (ViewInstanceEntity instance : xmlInstanceEntityMap.values()) {
-      // create an admin resource to represent this view instance
-      ResourceEntity resourceEntity = new ResourceEntity();
-      resourceEntity.setResourceType(view.getResourceType());
-      resourceDAO.create(resourceEntity);
-      instance.setResource(resourceEntity);
-
+      instance.setResource(createViewInstanceResource(resourceType));
       instanceDAO.merge(instance);
       instanceDAO.merge(instance);
-      bindViewInstance(view, instance);
-      instanceDefinitions.add(instance);
     }
     }
+  }
 
 
+  // create an admin resource to represent a view instance
+  private ResourceEntity createViewInstanceResource(ResourceTypeEntity resourceTypeEntity) {
+    ResourceEntity resourceEntity = new ResourceEntity();
+    resourceEntity.setResourceType(resourceTypeEntity);
+    resourceDAO.create(resourceEntity);
+    return resourceEntity;
   }
   }
 
 
   // ensure that the extracted view archive directory exists
   // ensure that the extracted view archive directory exists
@@ -1272,7 +1224,7 @@ public class ViewRegistry {
       if (grantedAuthority instanceof AmbariGrantedAuthority) {
       if (grantedAuthority instanceof AmbariGrantedAuthority) {
 
 
         AmbariGrantedAuthority authority       = (AmbariGrantedAuthority) grantedAuthority;
         AmbariGrantedAuthority authority       = (AmbariGrantedAuthority) grantedAuthority;
-        PrivilegeEntity privilegeEntity = authority.getPrivilegeEntity();
+        PrivilegeEntity        privilegeEntity = authority.getPrivilegeEntity();
         Integer                permissionId    = privilegeEntity.getPermission().getId();
         Integer                permissionId    = privilegeEntity.getPermission().getId();
 
 
         // admin has full access
         // admin has full access
@@ -1293,102 +1245,6 @@ public class ViewRegistry {
     return false;
     return false;
   }
   }
 
 
-  /**
-   * Static initialization of DAO.
-   *
-   * @param viewDAO         view data access object
-   * @param instanceDAO     view instance data access object
-   * @param userDAO         user data access object
-   * @param memberDAO       group member data access object
-   * @param privilegeDAO    the privilege data access object
-   * @param securityHelper  the security helper
-   */
-  public static void init(ViewDAO viewDAO, ViewInstanceDAO instanceDAO,
-                          UserDAO userDAO, MemberDAO memberDAO, PrivilegeDAO privilegeDAO,
-                          SecurityHelper securityHelper, ResourceDAO resourceDAO,
-                          ResourceTypeDAO resourceTypeDAO) {
-    setViewDAO(viewDAO);
-    setInstanceDAO(instanceDAO);
-    setUserDAO(userDAO);
-    setMemberDAO(memberDAO);
-    setPrivilegeDAO(privilegeDAO);
-    setSecurityHelper(securityHelper);
-    setResourceDAO(resourceDAO);
-    setResourceTypeDAO(resourceTypeDAO);
-  }
-
-  /**
-   * Set the view DAO.
-   *
-   * @param viewDAO  the view DAO
-   */
-  protected static void setViewDAO(ViewDAO viewDAO) {
-    ViewRegistry.viewDAO = viewDAO;
-  }
-
-  /**
-   * Set the instance DAO.
-   *
-   * @param instanceDAO  the instance DAO
-   */
-  protected static void setInstanceDAO(ViewInstanceDAO instanceDAO) {
-    ViewRegistry.instanceDAO = instanceDAO;
-  }
-
-  /**
-   * Set the user DAO.
-   *
-   * @param userDAO  the user DAO
-   */
-  protected static void setUserDAO(UserDAO userDAO) {
-    ViewRegistry.userDAO = userDAO;
-  }
-
-  /**
-   * Set the group member DAO.
-   *
-   * @param memberDAO  the group member DAO
-   */
-  protected static void setMemberDAO(MemberDAO memberDAO) {
-    ViewRegistry.memberDAO = memberDAO;
-  }
-
-  /**
-   * Set the privilege DAO.
-   *
-   * @param privilegeDAO  the privilege DAO
-   */
-  protected static void setPrivilegeDAO(PrivilegeDAO privilegeDAO) {
-    ViewRegistry.privilegeDAO = privilegeDAO;
-  }
-
-  /**
-   * Set the security helper.
-   *
-   * @param securityHelper  the security helper
-   */
-  protected static void setSecurityHelper(SecurityHelper securityHelper) {
-    ViewRegistry.securityHelper = securityHelper;
-  }
-
-  /**
-   * Set the resource DAO.
-   *
-   * @param resourceDAO the resource DAO
-   */
-  protected static void setResourceDAO(ResourceDAO resourceDAO) {
-    ViewRegistry.resourceDAO = resourceDAO;
-  }
-
-  /**
-   * Set the resource type DAO.
-   *
-   * @param resourceTypeDAO the resource type DAO.
-   */
-  protected static void setResourceTypeDAO(ResourceTypeDAO resourceTypeDAO) {
-    ViewRegistry.resourceTypeDAO = resourceTypeDAO;
-  }
-
 
 
   // ----- inner class : ViewRegistryHelper ----------------------------------
   // ----- inner class : ViewRegistryHelper ----------------------------------
 
 

+ 10 - 5
ambari-server/src/main/python/ambari-server.py

@@ -250,7 +250,7 @@ PG_STATUS_RUNNING = utils.get_postgre_running_status(OS_TYPE)
 PG_DEFAULT_PASSWORD = "bigdata"
 PG_DEFAULT_PASSWORD = "bigdata"
 SERVICE_CMD = "/usr/bin/env service"
 SERVICE_CMD = "/usr/bin/env service"
 PG_SERVICE_NAME = "postgresql"
 PG_SERVICE_NAME = "postgresql"
-PG_HBA_DIR = utils.get_postgre_hba_dir(OS_TYPE)
+PG_HBA_DIR = utils.get_postgre_hba_dir(OS_FAMILY)
 
 
 PG_ST_CMD = "%s %s status" % (SERVICE_CMD, PG_SERVICE_NAME)
 PG_ST_CMD = "%s %s status" % (SERVICE_CMD, PG_SERVICE_NAME)
 if os.path.isfile("/usr/bin/postgresql-setup"):
 if os.path.isfile("/usr/bin/postgresql-setup"):
@@ -2632,7 +2632,14 @@ def change_objects_owner(args):
 
 
   command = CHANGE_OWNER_COMMAND[:]
   command = CHANGE_OWNER_COMMAND[:]
   command[-1] = command[-1].format(database_name, 'ambari', new_owner)
   command[-1] = command[-1].format(database_name, 'ambari', new_owner)
-  return run_os_command(command)
+  retcode, stdout, stderr = run_os_command(command)
+  if not retcode == 0:
+    if VERBOSE:
+      if stdout:
+        print_error_msg(stdout.strip())
+      if stderr:
+        print_error_msg(stderr.strip())
+    raise FatalException(20, 'Unable to change owner of database objects')
 
 
 
 
 def compare_versions(version1, version2):
 def compare_versions(version1, version2):
@@ -2737,9 +2744,7 @@ def upgrade(args):
   parse_properties_file(args)
   parse_properties_file(args)
   #TODO check database version
   #TODO check database version
   if args.persistence_type == 'local':
   if args.persistence_type == 'local':
-    retcode, stdout, stderr = change_objects_owner(args)
-    if not retcode == 0:
-      raise FatalException(20, 'Unable to change owner of database objects')
+    change_objects_owner(args)
 
 
   retcode = run_schema_upgrade()
   retcode = run_schema_upgrade()
   if not retcode == 0:
   if not retcode == 0:

+ 45 - 14
ambari-server/src/main/python/ambari_server/utils.py

@@ -21,12 +21,16 @@ import os
 import signal
 import signal
 import sys
 import sys
 import time
 import time
+import glob
+import subprocess
 from ambari_commons import OSConst
 from ambari_commons import OSConst
 
 
-#PostgreSQL settings
-UBUNTU_PG_HBA_ROOT = "/etc/postgresql"
-PG_HBA_ROOT_DEFAULT = "/var/lib/pgsql/data"
+# PostgreSQL settings
 PG_STATUS_RUNNING_DEFAULT = "running"
 PG_STATUS_RUNNING_DEFAULT = "running"
+PG_HBA_ROOT_DEFAULT = "/var/lib/pgsql/data"
+PG_HBA_INIT_FILES = {'debian': '/etc/postgresql',
+                     'redhat': '/etc/rc.d/init.d/postgresql',
+                     'suse': '/etc/init.d/postgresql'}
 
 
 #Environment
 #Environment
 ENV_PATH_DEFAULT = ['/bin', '/usr/bin', '/sbin', '/usr/sbin']  # default search path
 ENV_PATH_DEFAULT = ['/bin', '/usr/bin', '/sbin', '/usr/sbin']  # default search path
@@ -167,25 +171,52 @@ def get_ubuntu_pg_version():
   """
   """
   postgre_ver = ""
   postgre_ver = ""
 
 
-  if os.path.isdir(UBUNTU_PG_HBA_ROOT):  # detect actual installed versions of PG and select a more new one
+  if os.path.isdir(PG_HBA_INIT_FILES[
+    'debian']):  # detect actual installed versions of PG and select a more new one
     postgre_ver = sorted(
     postgre_ver = sorted(
-    [fld for fld in os.listdir(UBUNTU_PG_HBA_ROOT) if os.path.isdir(os.path.join(UBUNTU_PG_HBA_ROOT, fld))], reverse=True)
+      [fld for fld in os.listdir(PG_HBA_INIT_FILES[OSConst.DEBIAN_FAMILY]) if
+       os.path.isdir(os.path.join(PG_HBA_INIT_FILES[OSConst.DEBIAN_FAMILY], fld))],
+      reverse=True)
     if len(postgre_ver) > 0:
     if len(postgre_ver) > 0:
       return postgre_ver[0]
       return postgre_ver[0]
   return postgre_ver
   return postgre_ver
 
 
 
 
-def get_postgre_hba_dir(OS):
-  """Return postgre hba dir location depends on OS"""
-  if OS == OSConst.OS_UBUNTU:
-    return os.path.join(UBUNTU_PG_HBA_ROOT, get_ubuntu_pg_version(), "main")
+def get_postgre_hba_dir(OS_FAMILY):
+  """Return postgre hba dir location depends on OS.
+  Also depends on version of postgres creates symlink like postgresql-->postgresql-9.3
+  1) /etc/rc.d/init.d/postgresql --> /etc/rc.d/init.d/postgresql-9.3
+  2) /etc/init.d/postgresql --> /etc/init.d/postgresql-9.1
+  """
+  if OS_FAMILY == OSConst.DEBIAN_FAMILY:
+    # Like: /etc/postgresql/9.1/main/
+    return os.path.join(PG_HBA_INIT_FILES[OS_FAMILY], get_ubuntu_pg_version(),
+                        "main")
   else:
   else:
-    return PG_HBA_ROOT_DEFAULT
-
-
-def get_postgre_running_status(OS):
+    if not os.path.isfile(PG_HBA_INIT_FILES[OS_FAMILY]):
+      # Link: /etc/init.d/postgresql --> /etc/init.d/postgresql-9.1
+      os.symlink(glob.glob(PG_HBA_INIT_FILES[OS_FAMILY] + '*')[0],
+                 PG_HBA_INIT_FILES[OS_FAMILY])
+
+    # Get postgres_data location (default: /var/lib/pgsql/data)
+    cmd = "alias exit=return; source " + PG_HBA_INIT_FILES[
+      OS_FAMILY] + " status &>/dev/null; echo $PGDATA"
+    p = subprocess.Popen(cmd,
+                         stdout=subprocess.PIPE,
+                         stdin=subprocess.PIPE,
+                         stderr=subprocess.PIPE,
+                         shell=True)
+    (PG_HBA_ROOT, err) = p.communicate()
+
+    if PG_HBA_ROOT and len(PG_HBA_ROOT.strip()) > 0:
+      return PG_HBA_ROOT.strip()
+    else:
+      return PG_HBA_ROOT_DEFAULT
+
+
+def get_postgre_running_status(OS_FAMILY):
   """Return postgre running status indicator"""
   """Return postgre running status indicator"""
-  if OS == OSConst.OS_UBUNTU:
+  if OS_FAMILY == OSConst.DEBIAN_FAMILY:
     return os.path.join(get_ubuntu_pg_version(), "main")
     return os.path.join(get_ubuntu_pg_version(), "main")
   else:
   else:
     return PG_STATUS_RUNNING_DEFAULT
     return PG_STATUS_RUNNING_DEFAULT

+ 3 - 1
ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql

@@ -28,7 +28,8 @@ delimiter ;
 
 
 CREATE TABLE clusters (cluster_id BIGINT NOT NULL, resource_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT', desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
 CREATE TABLE clusters (cluster_id BIGINT NOT NULL, resource_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, provisioning_state VARCHAR(255) NOT NULL DEFAULT 'INIT', desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
 CREATE TABLE clusterconfig (config_id BIGINT NOT NULL, version_tag VARCHAR(255) NOT NULL, version BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data LONGTEXT NOT NULL, config_attributes LONGTEXT, create_timestamp BIGINT NOT NULL, PRIMARY KEY (config_id));
 CREATE TABLE clusterconfig (config_id BIGINT NOT NULL, version_tag VARCHAR(255) NOT NULL, version BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data LONGTEXT NOT NULL, config_attributes LONGTEXT, create_timestamp BIGINT NOT NULL, PRIMARY KEY (config_id));
-CREATE TABLE serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', note LONGTEXT, PRIMARY KEY (service_config_id));
+CREATE TABLE serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', group_id BIGINT, note LONGTEXT, PRIMARY KEY (service_config_id));
+CREATE TABLE serviceconfighosts (service_config_id BIGINT NOT NULL, hostname VARCHAR(255), PRIMARY KEY(service_config_id, hostname));
 CREATE TABLE serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY(service_config_id, config_id));
 CREATE TABLE serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY(service_config_id, config_id));
 CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
 CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
 CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
 CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
@@ -119,6 +120,7 @@ ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_cluster_id FOREI
 ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
 ALTER TABLE hostconfigmapping ADD CONSTRAINT FK_hostconfmapping_host_name FOREIGN KEY (host_name) REFERENCES hosts (host_name);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id);
+ALTER TABLE serviceconfighosts ADD CONSTRAINT  FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE configgroup ADD CONSTRAINT FK_configgroup_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
 ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_confg FOREIGN KEY (cluster_id, config_type, version_tag) REFERENCES clusterconfig (cluster_id, type_name, version_tag);
 ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);
 ALTER TABLE confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES configgroup (group_id);

+ 3 - 1
ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql

@@ -19,7 +19,8 @@
 ------create tables---------
 ------create tables---------
 CREATE TABLE clusters (cluster_id NUMBER(19) NOT NULL, resource_id NUMBER(19) NOT NULL, cluster_info VARCHAR2(255) NULL, cluster_name VARCHAR2(100) NOT NULL UNIQUE, provisioning_state VARCHAR2(255) DEFAULT 'INIT' NOT NULL, desired_cluster_state VARCHAR2(255) NULL, desired_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
 CREATE TABLE clusters (cluster_id NUMBER(19) NOT NULL, resource_id NUMBER(19) NOT NULL, cluster_info VARCHAR2(255) NULL, cluster_name VARCHAR2(100) NOT NULL UNIQUE, provisioning_state VARCHAR2(255) DEFAULT 'INIT' NOT NULL, desired_cluster_state VARCHAR2(255) NULL, desired_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
 CREATE TABLE clusterconfig (config_id NUMBER(19) NOT NULL, version_tag VARCHAR2(255) NOT NULL, version NUMBER(19) NOT NULL, type_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_data CLOB NOT NULL, config_attributes CLOB, create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY (config_id));
 CREATE TABLE clusterconfig (config_id NUMBER(19) NOT NULL, version_tag VARCHAR2(255) NOT NULL, version NUMBER(19) NOT NULL, type_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_data CLOB NOT NULL, config_attributes CLOB, create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY (config_id));
-CREATE TABLE serviceconfig (service_config_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_name VARCHAR(255) NOT NULL, version NUMBER(19) NOT NULL, create_timestamp NUMBER(19) NOT NULL, user_name VARCHAR(255) DEFAULT '_db' NOT NULL, note CLOB, PRIMARY KEY (service_config_id));
+CREATE TABLE serviceconfig (service_config_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_name VARCHAR(255) NOT NULL, version NUMBER(19) NOT NULL, create_timestamp NUMBER(19) NOT NULL, user_name VARCHAR(255) DEFAULT '_db' NOT NULL, group_id NUMBER(19), note CLOB, PRIMARY KEY (service_config_id));
+CREATE TABLE serviceconfighosts (service_config_id NUMBER(19) NOT NULL, hostname VARCHAR(255), PRIMARY KEY(service_config_id, hostname));
 CREATE TABLE serviceconfigmapping (service_config_id NUMBER(19) NOT NULL, config_id NUMBER(19) NOT NULL, PRIMARY KEY(service_config_id, config_id));
 CREATE TABLE serviceconfigmapping (service_config_id NUMBER(19) NOT NULL, config_id NUMBER(19) NOT NULL, PRIMARY KEY(service_config_id, config_id));
 CREATE TABLE clusterservices (service_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_enabled NUMBER(10) NOT NULL, PRIMARY KEY (service_name, cluster_id));
 CREATE TABLE clusterservices (service_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_enabled NUMBER(10) NOT NULL, PRIMARY KEY (service_name, cluster_id));
 CREATE TABLE clusterstate (cluster_id NUMBER(19) NOT NULL, current_cluster_state VARCHAR2(255) NULL, current_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
 CREATE TABLE clusterstate (cluster_id NUMBER(19) NOT NULL, current_cluster_state VARCHAR2(255) NULL, current_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
@@ -86,6 +87,7 @@ ALTER TABLE adminpermission ADD CONSTRAINT UQ_perm_name_resource_type_id UNIQUE
 ALTER TABLE members ADD CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id);
 ALTER TABLE members ADD CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id);
 ALTER TABLE members ADD CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
 ALTER TABLE members ADD CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
 ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
+ALTER TABLE serviceconfighosts ADD CONSTRAINT  FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);
 ALTER TABLE clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id);

+ 4 - 1
ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql

@@ -23,7 +23,9 @@ CREATE TABLE clusterconfig (config_id BIGINT NOT NULL, version_tag VARCHAR(255)
 
 
 CREATE TABLE clusterconfigmapping (cluster_id BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, type_name, create_timestamp));
 CREATE TABLE clusterconfigmapping (cluster_id BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, type_name, create_timestamp));
 
 
-CREATE TABLE serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', note TEXT, PRIMARY KEY (service_config_id));
+CREATE TABLE serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', group_id BIGINT, note TEXT, PRIMARY KEY (service_config_id));
+
+CREATE TABLE serviceconfighosts (service_config_id BIGINT NOT NULL, hostname VARCHAR(255), PRIMARY KEY(service_config_id, hostname));
 
 
 CREATE TABLE serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY(service_config_id, config_id));
 CREATE TABLE serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY(service_config_id, config_id));
 
 
@@ -171,6 +173,7 @@ ALTER TABLE users ADD CONSTRAINT FK_users_principal_id FOREIGN KEY (principal_id
 ALTER TABLE groups ADD CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
 ALTER TABLE groups ADD CONSTRAINT FK_groups_principal_id FOREIGN KEY (principal_id) REFERENCES adminprincipal(principal_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id);
 ALTER TABLE serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES clusterconfig(config_id);
+ALTER TABLE serviceconfighosts ADD CONSTRAINT  FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES serviceconfig(service_config_id);
 ALTER TABLE clusters ADD CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
 ALTER TABLE clusters ADD CONSTRAINT FK_clusters_resource_id FOREIGN KEY (resource_id) REFERENCES adminresource(resource_id);
 
 
 -- Alerting Framework
 -- Alerting Framework

+ 7 - 3
ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql

@@ -37,9 +37,12 @@ GRANT ALL PRIVILEGES ON TABLE ambari.clusterconfig TO :username;
 CREATE TABLE ambari.clusterconfigmapping (cluster_id BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, type_name, create_timestamp));
 CREATE TABLE ambari.clusterconfigmapping (cluster_id BIGINT NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, type_name, create_timestamp));
 GRANT ALL PRIVILEGES ON TABLE ambari.clusterconfigmapping TO :username;
 GRANT ALL PRIVILEGES ON TABLE ambari.clusterconfigmapping TO :username;
 
 
-CREATE TABLE ambari.serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', note TEXT, PRIMARY KEY (service_config_id));
+CREATE TABLE ambari.serviceconfig (service_config_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, version BIGINT NOT NULL, create_timestamp BIGINT NOT NULL, user_name VARCHAR(255) NOT NULL DEFAULT '_db', group_id BIGINT, note TEXT, PRIMARY KEY (service_config_id));
 GRANT ALL PRIVILEGES ON TABLE ambari.serviceconfig TO :username;
 GRANT ALL PRIVILEGES ON TABLE ambari.serviceconfig TO :username;
 
 
+CREATE TABLE ambari.serviceconfighosts (service_config_id BIGINT NOT NULL, hostname VARCHAR(255), PRIMARY KEY(service_config_id, hostname));
+GRANT ALL PRIVILEGES ON TABLE ambari.serviceconfighosts TO :username;
+
 CREATE TABLE ambari.serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY(service_config_id, config_id));
 CREATE TABLE ambari.serviceconfigmapping (service_config_id BIGINT NOT NULL, config_id BIGINT NOT NULL, PRIMARY KEY(service_config_id, config_id));
 GRANT ALL PRIVILEGES ON TABLE ambari.serviceconfigmapping TO :username;
 GRANT ALL PRIVILEGES ON TABLE ambari.serviceconfigmapping TO :username;
 
 
@@ -181,8 +184,8 @@ ALTER TABLE ambari.serviceconfig ADD CONSTRAINT UQ_scv_service_version UNIQUE (c
 ALTER TABLE ambari.adminpermission ADD CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id);
 ALTER TABLE ambari.adminpermission ADD CONSTRAINT UQ_perm_name_resource_type_id UNIQUE (permission_name, resource_type_id);
 
 
 --------altering tables by creating foreign keys----------
 --------altering tables by creating foreign keys----------
-ALTER TABLE members ADD CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES groups (group_id);
-ALTER TABLE members ADD CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES users (user_id);
+ALTER TABLE ambari.members ADD CONSTRAINT FK_members_group_id FOREIGN KEY (group_id) REFERENCES ambari.groups (group_id);
+ALTER TABLE ambari.members ADD CONSTRAINT FK_members_user_id FOREIGN KEY (user_id) REFERENCES ambari.users (user_id);
 ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.clusterconfigmapping ADD CONSTRAINT clusterconfigmappingcluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
@@ -226,6 +229,7 @@ ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confg FOREIGN
 ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_cgccm_gid FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES ambari.serviceconfig(service_config_id);
 ALTER TABLE ambari.serviceconfigmapping ADD CONSTRAINT FK_scvm_scv FOREIGN KEY (service_config_id) REFERENCES ambari.serviceconfig(service_config_id);
 ALTER TABLE ambari.serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES ambari.clusterconfig(config_id);
 ALTER TABLE ambari.serviceconfigmapping ADD CONSTRAINT FK_scvm_config FOREIGN KEY (config_id) REFERENCES ambari.clusterconfig(config_id);
+ALTER TABLE ambari.serviceconfighosts ADD CONSTRAINT  FK_scvhosts_scv FOREIGN KEY (service_config_id) REFERENCES ambari.serviceconfig(service_config_id);
 ALTER TABLE ambari.adminresource ADD CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES ambari.adminresourcetype(resource_type_id);
 ALTER TABLE ambari.adminresource ADD CONSTRAINT FK_resource_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES ambari.adminresourcetype(resource_type_id);
 ALTER TABLE ambari.adminprincipal ADD CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES ambari.adminprincipaltype(principal_type_id);
 ALTER TABLE ambari.adminprincipal ADD CONSTRAINT FK_principal_principal_type_id FOREIGN KEY (principal_type_id) REFERENCES ambari.adminprincipaltype(principal_type_id);
 ALTER TABLE ambari.adminpermission ADD CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES ambari.adminresourcetype(resource_type_id);
 ALTER TABLE ambari.adminpermission ADD CONSTRAINT FK_permission_resource_type_id FOREIGN KEY (resource_type_id) REFERENCES ambari.adminresourcetype(resource_type_id);

+ 3 - 0
ambari-server/src/main/resources/properties.json

@@ -88,6 +88,9 @@
     "ServiceConfigVersion":[
     "ServiceConfigVersion":[
         "ServiceConfigVersion/cluster_name",
         "ServiceConfigVersion/cluster_name",
         "ServiceConfigVersion/service_name",
         "ServiceConfigVersion/service_name",
+        "ServiceConfigVersion/group_id",
+        "ServiceConfigVersion/group_name",
+        "ServiceConfigVersion/hosts",
         "ServiceConfigVersion/serviceconfigversion",
         "ServiceConfigVersion/serviceconfigversion",
         "ServiceConfigVersion/createtime",
         "ServiceConfigVersion/createtime",
         "ServiceConfigVersion/user",
         "ServiceConfigVersion/user",

+ 3 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py

@@ -80,6 +80,7 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 zk_user = config['configurations']['zookeeper-env']['zk_user']
 zk_user = config['configurations']['zookeeper-env']['zk_user']
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 
 user_group = config['configurations']['hadoop-env']['user_group']
 user_group = config['configurations']['hadoop-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
@@ -103,6 +104,7 @@ namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 
 
+has_sqoop_client = 'sqoop-env' in config['configurations']
 has_resourcemanager = not len(rm_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_namenode = not len(namenode_host) == 0
 has_namenode = not len(namenode_host) == 0
 has_jt = not len(jtnode_host) == 0
 has_jt = not len(jtnode_host) == 0
@@ -130,4 +132,4 @@ ignore_groupsusers_create = default("/configurations/hadoop-env/ignore_groupsuse
 
 
 #repo params
 #repo params
 repo_info = config['hostLevelParams']['repo_info']
 repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
+service_repo_info = default("/hostLevelParams/service_repo_info",None)

+ 7 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -124,6 +124,13 @@ def setup_users():
          ignore_failures = params.ignore_groupsusers_create
          ignore_failures = params.ignore_groupsusers_create
     )
     )
 
 
+  if params.has_sqoop_client:
+    User(params.sqoop_user,
+         gid=params.user_group,
+         groups=[params.user_group],
+         ignore_failures=params.ignore_groupsusers_create
+    )
+
 def set_uid(user, user_dirs):
 def set_uid(user, user_dirs):
   """
   """
   user_dirs - comma separated directories
   user_dirs - comma separated directories

+ 1 - 4
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py

@@ -24,10 +24,7 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-if System.get_instance().os_type == "oraclelinux":
-  ulimit_cmd = ''
-else:
-  ulimit_cmd = "ulimit -c unlimited; "
+ulimit_cmd = "ulimit -c unlimited; "
 
 
 #security params
 #security params
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']

+ 2 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py

@@ -31,7 +31,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
     "ls {pid_file} >/dev/null 2>&1 &&"
     "ls {pid_file} >/dev/null 2>&1 &&"
     " ps `cat {pid_file}` >/dev/null 2>&1")
     " ps `cat {pid_file}` >/dev/null 2>&1")
   hadoop_daemon = format(
   hadoop_daemon = format(
-    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
     "{hadoop_bin}/hadoop-daemon.sh")
     "{hadoop_bin}/hadoop-daemon.sh")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
 
@@ -49,7 +49,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
       pid_file = format(
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
 
-  daemon_cmd = format("{cmd} {action} {name}")
+  daemon_cmd = format("{ulimit_cmd} su - {user} -c '{cmd} {action} {name}'")
 
 
   service_is_up = check_process if action == "start" else None
   service_is_up = check_process if action == "start" else None
   #remove pid file from dead process
   #remove pid file from dead process
@@ -58,7 +58,6 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
        not_if=check_process,
        not_if=check_process,
   )
   )
   Execute(daemon_cmd,
   Execute(daemon_cmd,
-          user = user,
           not_if=service_is_up
           not_if=service_is_up
   )
   )
   if action == "stop":
   if action == "stop":

+ 22 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml

@@ -173,6 +173,28 @@
             <script>scripts/hcat_client.py</script>
             <script>scripts/hcat_client.py</script>
             <scriptType>PYTHON</scriptType>
             <scriptType>PYTHON</scriptType>
           </commandScript>
           </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
         </component>
         </component>
       </components>
       </components>
       <osSpecifics>
       <osSpecifics>

+ 31 - 22
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig.properties.j2 → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-properties.xml

@@ -1,24 +1,31 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
 
 
+<configuration supports_final="false">
 
 
-#
-#
+  <property>
+    <name>content</name>
+    <description>Describe all the Pig agent configurations</description>
+    <value>
 # Licensed to the Apache Software Foundation (ASF) under one
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
 # distributed with this work for additional information
@@ -35,9 +42,7 @@
 # KIND, either express or implied.  See the License for the
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # specific language governing permissions and limitations
 # under the License.
 # under the License.
-#
-#
-#
+
 
 
 # Pig configuration file. All values can be overwritten by command line arguments.
 # Pig configuration file. All values can be overwritten by command line arguments.
 
 
@@ -72,3 +77,7 @@
 #using more counter than hadoop configured limit
 #using more counter than hadoop configured limit
 #pig.disable.counter=true
 #pig.disable.counter=true
 hcat.bin=/usr/bin/hcat
 hcat.bin=/usr/bin/hcat
+    </value>
+  </property>
+
+</configuration>

+ 0 - 52
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig.properties

@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

+ 7 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml

@@ -42,7 +42,12 @@
               <type>env</type>
               <type>env</type>
               <fileName>pig-log4j.properties</fileName>
               <fileName>pig-log4j.properties</fileName>
               <dictionaryName>pig-log4j</dictionaryName>
               <dictionaryName>pig-log4j</dictionaryName>
-            </configFile>            
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>pig.properties</fileName>
+              <dictionaryName>pig-properties</dictionaryName>
+            </configFile>
           </configFiles>
           </configFiles>
         </component>
         </component>
       </components>
       </components>
@@ -70,6 +75,7 @@
       <configuration-dependencies>
       <configuration-dependencies>
         <config-type>pig-env</config-type>
         <config-type>pig-env</config-type>
         <config-type>pig-log4j</config-type>
         <config-type>pig-log4j</config-type>
+        <config-type>pig-properties</config-type>
       </configuration-dependencies>
       </configuration-dependencies>
 
 
     </service>
     </service>

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py

@@ -45,3 +45,5 @@ if (('pig-log4j' in config['configurations']) and ('content' in config['configur
   log4j_props = config['configurations']['pig-log4j']['content']
   log4j_props = config['configurations']['pig-log4j']['content']
 else:
 else:
   log4j_props = None
   log4j_props = None
+
+pig_properties = config['configurations']['pig-properties']['content']

+ 8 - 16
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py

@@ -34,9 +34,14 @@ def pig():
        owner=params.hdfs_user,
        owner=params.hdfs_user,
        content=InlineTemplate(params.pig_env_sh_template)
        content=InlineTemplate(params.pig_env_sh_template)
   )
   )
-  
-  pig_TemplateConfig( ['pig.properties'])
-  
+
+  File(format("{params.pig_conf_dir}/pig.properties"),
+       mode=0644,
+       group=params.user_group,
+       owner=params.hdfs_user,
+       content=params.pig_properties
+  )
+
   if (params.log4j_props != None):
   if (params.log4j_props != None):
     File(format("{params.pig_conf_dir}/log4j.properties"),
     File(format("{params.pig_conf_dir}/log4j.properties"),
          mode=0644,
          mode=0644,
@@ -50,16 +55,3 @@ def pig():
          group=params.user_group,
          group=params.user_group,
          owner=params.hdfs_user
          owner=params.hdfs_user
     )
     )
-
-
-def pig_TemplateConfig(name):
-  import params
-
-  if not isinstance(name, list):
-    name = [name]
-
-  for x in name:
-    TemplateConfig( format("{pig_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )
-

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/configuration/sqoop-env.xml

@@ -45,5 +45,10 @@ export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
 export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
 export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
     </value>
     </value>
   </property>
   </property>
+  <property>
+    <name>sqoop_user</name>
+    <description>User to run Sqoop as</description>
+    <value>sqoop</value>
+  </property>
   
   
 </configuration>
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py

@@ -31,7 +31,7 @@ hbase_home = "/usr"
 hive_home = "/usr"
 hive_home = "/usr"
 zoo_conf_dir = "/etc/zookeeper"
 zoo_conf_dir = "/etc/zookeeper"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = "sqoop"
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 
 keytab_path = config['configurations']['hadoop-env']['keytab_path']
 keytab_path = config['configurations']['hadoop-env']['keytab_path']
 smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py

@@ -41,6 +41,7 @@ gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 storm_user = config['configurations']['storm-env']['storm_user']
 storm_user = config['configurations']['storm-env']['storm_user']
 tez_user = config['configurations']['tez-env']['tez_user']
 tez_user = config['configurations']['tez-env']['tez_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 
 user_group = config['configurations']['hadoop-env']['user_group']
 user_group = config['configurations']['hadoop-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
@@ -66,6 +67,7 @@ ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
 storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
 storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
 falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
 falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
 
 
+has_sqoop_client = 'sqoop-env' in config['configurations']
 has_namenode = not len(namenode_host) == 0
 has_namenode = not len(namenode_host) == 0
 has_hs = not len(hs_host) == 0
 has_hs = not len(hs_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_resourcemanager = not len(rm_host) == 0

+ 8 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -144,13 +144,20 @@ def setup_users():
          ignore_failures = params.ignore_groupsusers_create
          ignore_failures = params.ignore_groupsusers_create
     )
     )
     
     
-  if params.has_tez:  
+  if params.has_tez:
     User(params.tez_user,
     User(params.tez_user,
       gid=params.user_group,
       gid=params.user_group,
       groups=[params.proxyuser_group],
       groups=[params.proxyuser_group],
       ignore_failures = params.ignore_groupsusers_create
       ignore_failures = params.ignore_groupsusers_create
     )
     )
 
 
+  if params.has_sqoop_client:
+    User(params.sqoop_user,
+         gid=params.user_group,
+         groups=[params.user_group],
+         ignore_failures=params.ignore_groupsusers_create
+    )
+
 def set_uid(user, user_dirs):
 def set_uid(user, user_dirs):
   """
   """
   user_dirs - comma separated directories
   user_dirs - comma separated directories

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py

@@ -29,10 +29,10 @@ def write_function(path, handle, interval):
           handle.flush()
           handle.flush()
           time.sleep(interval)
           time.sleep(interval)
           
           
-thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1))
+thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1.5))
 thread.start()
 thread.start()
 
 
-threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 0.3))
+threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 1.5 * 0.023))
 threaderr.start()
 threaderr.start()
 
 
 thread.join()  
 thread.join()  

+ 12 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py

@@ -101,11 +101,13 @@ class NameNode(Script):
     _print("Executing command %s\n" % command)
     _print("Executing command %s\n" % command)
     
     
     parser = hdfs_rebalance.HdfsParser()
     parser = hdfs_rebalance.HdfsParser()
-    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                          shell=False,
-                          close_fds=True,
-                          cwd=basedir
-                          )
+    proc = subprocess.Popen(
+                            command, 
+                            stdout=subprocess.PIPE, 
+                            shell=False,
+                            close_fds=True,
+                            cwd=basedir
+                           )
     for line in iter(proc.stdout.readline, ''):
     for line in iter(proc.stdout.readline, ''):
       _print('[balancer] %s %s' % (str(datetime.now()), line ))
       _print('[balancer] %s %s' % (str(datetime.now()), line ))
       pl = parser.parseLine(line)
       pl = parser.parseLine(line)
@@ -118,7 +120,11 @@ class NameNode(Script):
         _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
         _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
         self.put_structured_out({'completePercent' : 1})
         self.put_structured_out({'completePercent' : 1})
         break
         break
-      
+    
+    proc.stdout.close()
+    proc.wait()
+    if proc.returncode != None and proc.returncode != 0:
+      raise Fail('Hdfs rebalance process exited with error. See the log output')
       
       
 def _print(line):
 def _print(line):
   sys.stdout.write(line)
   sys.stdout.write(line)

+ 1 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -24,10 +24,7 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-if System.get_instance().os_type == "oraclelinux":
-  ulimit_cmd = ''
-else:
-  ulimit_cmd = "ulimit -c unlimited; "
+ulimit_cmd = "ulimit -c unlimited; "
 
 
 #security params
 #security params
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']

+ 2 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py

@@ -31,7 +31,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
     "ls {pid_file} >/dev/null 2>&1 &&"
     "ls {pid_file} >/dev/null 2>&1 &&"
     " ps `cat {pid_file}` >/dev/null 2>&1")
     " ps `cat {pid_file}` >/dev/null 2>&1")
   hadoop_daemon = format(
   hadoop_daemon = format(
-    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
     "{hadoop_bin}/hadoop-daemon.sh")
     "{hadoop_bin}/hadoop-daemon.sh")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
 
@@ -49,7 +49,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
       pid_file = format(
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
 
-  daemon_cmd = format("{cmd} {action} {name}")
+  daemon_cmd = format("{ulimit_cmd} su - {user} -c '{cmd} {action} {name}'")
 
 
   service_is_up = check_process if action == "start" else None
   service_is_up = check_process if action == "start" else None
   #remove pid file from dead process
   #remove pid file from dead process
@@ -58,7 +58,6 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
        not_if=check_process,
        not_if=check_process,
   )
   )
   Execute(daemon_cmd,
   Execute(daemon_cmd,
-          user = user,
           not_if=service_is_up
           not_if=service_is_up
   )
   )
   if action == "stop":
   if action == "stop":

+ 22 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml

@@ -186,9 +186,28 @@
             <script>scripts/hcat_client.py</script>
             <script>scripts/hcat_client.py</script>
             <scriptType>PYTHON</scriptType>
             <scriptType>PYTHON</scriptType>
           </commandScript>
           </commandScript>
-          <downloads>
-            <source>/etc/hcatalog/conf</source>
-          </downloads>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
         </component>
         </component>
       </components>
       </components>
       <osSpecifics>
       <osSpecifics>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml

@@ -45,7 +45,7 @@
             </configFile>
             </configFile>
             <configFile>
             <configFile>
               <type>env</type>
               <type>env</type>
-              <fileName>pig-properties.properties</fileName>
+              <fileName>pig.properties</fileName>
               <dictionaryName>pig-properties</dictionaryName>
               <dictionaryName>pig-properties</dictionaryName>
             </configFile>                         
             </configFile>                         
           </configFiles>          
           </configFiles>          

+ 3 - 13
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py

@@ -40,16 +40,6 @@ pig_env_sh_template = config['configurations']['pig-env']['content']
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 hadoop_home = "/usr"
 hadoop_home = "/usr"
 
 
-# pig.properties - if not in the JSON command, then we need to esnure some 
-# basic properties are set; this is a safety mechanism
-if (('pig-properties' in config['configurations']) and ('pig-content' in config['configurations']['pig-properties'])):
-  pig_properties = config['configurations']['pig-properties']['pig-content']
-else:
-  pig_properties = """hcat.bin=/usr/bin/hcat
-pig.location.check.strict=false"""
-
-# log4j.properties
-if (('pig-log4j' in config['configurations']) and ('content' in config['configurations']['pig-log4j'])):
-  log4j_props = config['configurations']['pig-log4j']['content']
-else:
-  log4j_props = None
+pig_properties = config['configurations']['pig-properties']['content']
+
+log4j_props = config['configurations']['pig-log4j']['content']

+ 0 - 12
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py

@@ -56,15 +56,3 @@ def pig():
       group=params.user_group,
       group=params.user_group,
       owner=params.hdfs_user
       owner=params.hdfs_user
     )
     )
-
-def pig_TemplateConfig(name):
-  import params
-
-  if not isinstance(name, list):
-    name = [name]
-
-  for x in name:
-    TemplateConfig( format("{pig_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )
-

+ 5 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/configuration/sqoop-env.xml

@@ -45,5 +45,9 @@ export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
 export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
 export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
     </value>
     </value>
   </property>
   </property>
-  
+  <property>
+    <name>sqoop_user</name>
+    <description>User to run Sqoop as</description>
+    <value>sqoop</value>
+  </property>
 </configuration>
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py

@@ -32,7 +32,7 @@ hbase_home = "/usr"
 hive_home = "/usr"
 hive_home = "/usr"
 zoo_conf_dir = "/etc/zookeeper"
 zoo_conf_dir = "/etc/zookeeper"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = "sqoop"
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 
 keytab_path = config['configurations']['hadoop-env']['keytab_path']
 keytab_path = config['configurations']['hadoop-env']['keytab_path']
 smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py

@@ -24,7 +24,7 @@ def sqoop(type=None):
   import params
   import params
   Link(params.sqoop_lib + "/mysql-connector-java.jar",
   Link(params.sqoop_lib + "/mysql-connector-java.jar",
        to = '/usr/share/java/mysql-connector-java.jar'
        to = '/usr/share/java/mysql-connector-java.jar'
-  )
+  ) 
   Directory(params.sqoop_conf_dir,
   Directory(params.sqoop_conf_dir,
             owner = params.sqoop_user,
             owner = params.sqoop_user,
             group = params.user_group
             group = params.user_group

+ 13 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py

@@ -401,8 +401,11 @@ class HDP206StackAdvisor(StackAdvisor):
       if validator is not None:
       if validator is not None:
         siteName = validator[0]
         siteName = validator[0]
         method = validator[1]
         method = validator[1]
-        resultItems = method(getSiteProperties(configurations, siteName), recommendedDefaults[siteName]["properties"])
-        items.extend(resultItems)
+        if siteName in recommendedDefaults:
+          siteProperties = getSiteProperties(configurations, siteName)
+          if siteProperties is not None:
+            resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"])
+            items.extend(resultItems)
     return validations
     return validations
     pass
     pass
 
 
@@ -421,6 +424,8 @@ class HDP206StackAdvisor(StackAdvisor):
     return result
     return result
 
 
   def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
   def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
+    if not propertyName in properties:
+      return "Value should be set"
     value = to_number(properties[propertyName])
     value = to_number(properties[propertyName])
     if value is None:
     if value is None:
       return "Value should be integer"
       return "Value should be integer"
@@ -432,6 +437,8 @@ class HDP206StackAdvisor(StackAdvisor):
     return None
     return None
 
 
   def validateXmxValue(self, properties, recommendedDefaults, propertyName):
   def validateXmxValue(self, properties, recommendedDefaults, propertyName):
+    if not propertyName in properties:
+      return "Value should be set"
     value = properties[propertyName]
     value = properties[propertyName]
     defaultValue = recommendedDefaults[propertyName]
     defaultValue = recommendedDefaults[propertyName]
     if defaultValue is None:
     if defaultValue is None:
@@ -464,11 +471,10 @@ class HDP206StackAdvisor(StackAdvisor):
 
 
 # Validation helper methods
 # Validation helper methods
 def getSiteProperties(configurations, siteName):
 def getSiteProperties(configurations, siteName):
-  if configurations[siteName] is None:
-    return {}
-  if configurations[siteName]["properties"] is None:
-    return {}
-  return configurations[siteName]["properties"]
+  siteConfig = configurations.get(siteName)
+  if siteConfig is None:
+    return None
+  return siteConfig.get("properties")
 
 
 def to_number(s):
 def to_number(s):
   try:
   try:

+ 22 - 3
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/metainfo.xml

@@ -44,9 +44,28 @@
             <script>scripts/hcat_client.py</script>
             <script>scripts/hcat_client.py</script>
             <scriptType>PYTHON</scriptType>
             <scriptType>PYTHON</scriptType>
           </commandScript>
           </commandScript>
-          <downloads>
-            <source>/etc/hive-hcatalog/conf</source>
-          </downloads>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
         </component>
         </component>
       </components>
       </components>
       <osSpecifics>
       <osSpecifics>

+ 7 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java

@@ -28,6 +28,8 @@ import org.apache.ambari.server.api.services.persistence.PersistenceManager;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.view.ViewRegistry;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.*;
 import java.util.*;
@@ -40,6 +42,11 @@ import static org.junit.Assert.*;
  */
  */
 public class CreateHandlerTest {
 public class CreateHandlerTest {
 
 
+  @Before
+  public void before() {
+    ViewRegistry.initInstance(new ViewRegistry());
+  }
+
   @Test
   @Test
   public void testHandleRequest__Synchronous_NoPropsInBody() throws Exception {
   public void testHandleRequest__Synchronous_NoPropsInBody() throws Exception {
     Request request = createNiceMock(Request.class);
     Request request = createNiceMock(Request.class);

+ 7 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java

@@ -28,6 +28,8 @@ import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.view.ViewRegistry;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.*;
 import java.util.*;
@@ -41,6 +43,11 @@ import static org.junit.Assert.assertEquals;
  */
  */
 public class DeleteHandlerTest {
 public class DeleteHandlerTest {
 
 
+  @Before
+  public void before() {
+    ViewRegistry.initInstance(new ViewRegistry());
+  }
+
   @Test
   @Test
   public void testHandleRequest__Synchronous_NoPropsInBody() throws Exception {
   public void testHandleRequest__Synchronous_NoPropsInBody() throws Exception {
     Request request = createMock(Request.class);
     Request request = createMock(Request.class);

+ 7 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java

@@ -28,6 +28,8 @@ import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.view.ViewRegistry;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.*;
 import java.util.*;
@@ -40,6 +42,11 @@ import static org.junit.Assert.*;
  */
  */
 public class UpdateHandlerTest {
 public class UpdateHandlerTest {
 
 
+  @Before
+  public void before() {
+    ViewRegistry.initInstance(new ViewRegistry());
+  }
+
   @Test
   @Test
   public void testHandleRequest__Synchronous_NoPropsInBody() throws Exception {
   public void testHandleRequest__Synchronous_NoPropsInBody() throws Exception {
     Request request = createMock(Request.class);
     Request request = createMock(Request.class);

+ 7 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java

@@ -42,7 +42,9 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.view.ViewRegistry;
 import org.junit.Assert;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.List;
 import java.util.List;
@@ -54,6 +56,11 @@ import java.util.Set;
  */
  */
 public class BaseResourceDefinitionTest {
 public class BaseResourceDefinitionTest {
 
 
+  @Before
+  public void before() {
+    ViewRegistry.initInstance(new ViewRegistry());
+  }
+
   @Test
   @Test
   public void testGetPostProcessors() throws AmbariException {
   public void testGetPostProcessors() throws AmbariException {
     BaseResourceDefinition resourceDefinition = getResourceDefinition();
     BaseResourceDefinition resourceDefinition = getResourceDefinition();

+ 14 - 6
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AmbariPrivilegeResourceProviderTest.java

@@ -19,6 +19,7 @@
 package org.apache.ambari.server.controller.internal;
 package org.apache.ambari.server.controller.internal;
 
 
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expect;
@@ -35,17 +36,20 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 
 
-import org.apache.ambari.server.controller.ivory.Cluster;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.GroupDAO;
 import org.apache.ambari.server.orm.dao.GroupDAO;
+import org.apache.ambari.server.orm.dao.MemberDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
+import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
+import org.apache.ambari.server.orm.dao.ViewDAO;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.GroupEntity;
 import org.apache.ambari.server.orm.entities.GroupEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
@@ -57,7 +61,9 @@ import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
+import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistry;
+import org.apache.ambari.server.view.ViewRegistryTest;
 import org.easymock.EasyMock;
 import org.easymock.EasyMock;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
@@ -75,6 +81,11 @@ public class AmbariPrivilegeResourceProviderTest {
   private final static PrincipalDAO principalDAO = createStrictMock(PrincipalDAO.class);
   private final static PrincipalDAO principalDAO = createStrictMock(PrincipalDAO.class);
   private final static PermissionDAO permissionDAO = createStrictMock(PermissionDAO.class);
   private final static PermissionDAO permissionDAO = createStrictMock(PermissionDAO.class);
   private final static ResourceDAO resourceDAO = createStrictMock(ResourceDAO.class);
   private final static ResourceDAO resourceDAO = createStrictMock(ResourceDAO.class);
+  private static final ViewDAO viewDAO = createMock(ViewDAO.class);
+  private static final ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
+  private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
+  private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
+  private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
 
 
   @BeforeClass
   @BeforeClass
   public static void initClass() {
   public static void initClass() {
@@ -84,6 +95,8 @@ public class AmbariPrivilegeResourceProviderTest {
 
 
   @Before
   @Before
   public void resetGlobalMocks() {
   public void resetGlobalMocks() {
+    ViewRegistry.initInstance(ViewRegistryTest.getRegistry(viewDAO, viewInstanceDAO, userDAO,
+        memberDAO, privilegeDAO, resourceDAO, resourceTypeDAO, securityHelper));
     reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO, clusterDAO);
     reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO, clusterDAO);
   }
   }
 
 
@@ -220,11 +233,6 @@ public class AmbariPrivilegeResourceProviderTest {
     expect(clusterEntity.getResource()).andReturn(clusterResourceEntity).anyTimes();
     expect(clusterEntity.getResource()).andReturn(clusterResourceEntity).anyTimes();
     expect(clusterEntity.getClusterName()).andReturn("cluster1").anyTimes();
     expect(clusterEntity.getClusterName()).andReturn("cluster1").anyTimes();
 
 
-    List<PrincipalEntity> principalEntities = new LinkedList<PrincipalEntity>();
-    principalEntities.add(ambariPrincipalEntity);
-    principalEntities.add(viewPrincipalEntity);
-    principalEntities.add(clusterPrincipalEntity);
-
     List<UserEntity> userEntities = new LinkedList<UserEntity>();
     List<UserEntity> userEntities = new LinkedList<UserEntity>();
     userEntities.add(ambariUserEntity);
     userEntities.add(ambariUserEntity);
     userEntities.add(viewUserEntity);
     userEntities.add(viewUserEntity);

+ 14 - 7
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ViewPrivilegeResourceProviderTest.java

@@ -21,11 +21,15 @@ package org.apache.ambari.server.controller.internal;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.orm.dao.GroupDAO;
 import org.apache.ambari.server.orm.dao.GroupDAO;
+import org.apache.ambari.server.orm.dao.MemberDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
+import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
+import org.apache.ambari.server.orm.dao.ViewDAO;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.entities.GroupEntity;
 import org.apache.ambari.server.orm.entities.GroupEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
@@ -37,9 +41,9 @@ import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewEntityTest;
 import org.apache.ambari.server.orm.entities.ViewEntityTest;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntityTest;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntityTest;
+import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.apache.ambari.server.view.ViewRegistryTest;
 import org.apache.ambari.server.view.ViewRegistryTest;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
@@ -50,6 +54,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Set;
 import java.util.Set;
 
 
+import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expect;
@@ -67,6 +72,11 @@ public class ViewPrivilegeResourceProviderTest {
   private final static PrincipalDAO principalDAO = createStrictMock(PrincipalDAO.class);
   private final static PrincipalDAO principalDAO = createStrictMock(PrincipalDAO.class);
   private final static PermissionDAO permissionDAO = createStrictMock(PermissionDAO.class);
   private final static PermissionDAO permissionDAO = createStrictMock(PermissionDAO.class);
   private final static ResourceDAO resourceDAO = createStrictMock(ResourceDAO.class);
   private final static ResourceDAO resourceDAO = createStrictMock(ResourceDAO.class);
+  private static final ViewDAO viewDAO = createMock(ViewDAO.class);
+  private static final ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
+  private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
+  private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
+  private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
 
 
   @BeforeClass
   @BeforeClass
   public static void initClass() {
   public static void initClass() {
@@ -75,13 +85,10 @@ public class ViewPrivilegeResourceProviderTest {
 
 
   @Before
   @Before
   public void resetGlobalMocks() {
   public void resetGlobalMocks() {
-    ViewRegistryTest.clear();
-    reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO);
-  }
 
 
-  @AfterClass
-  public static void afterClass() {
-    ViewRegistryTest.clear();
+    ViewRegistry.initInstance(ViewRegistryTest.getRegistry(viewDAO, viewInstanceDAO, userDAO,
+        memberDAO, privilegeDAO, resourceDAO, resourceTypeDAO, securityHelper));
+    reset(privilegeDAO, userDAO, groupDAO, principalDAO, permissionDAO, resourceDAO);
   }
   }
 
 
   @Test
   @Test

+ 40 - 13
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java

@@ -63,6 +63,7 @@ import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.KeyValueDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalDAO;
 import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
 import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
@@ -74,6 +75,9 @@ import org.apache.ambari.server.orm.dao.ViewDAO;
 import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
+import org.apache.ambari.server.orm.entities.KeyValueEntity;
+import org.apache.ambari.server.orm.entities.PrivilegeEntity;
+import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
@@ -231,6 +235,7 @@ public class UpgradeCatalog170Test {
     ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
     ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
     PermissionDAO permissionDAO = createNiceMock(PermissionDAO.class);
     PermissionDAO permissionDAO = createNiceMock(PermissionDAO.class);
     PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
     PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
+    KeyValueDAO keyValueDAO = createNiceMock(KeyValueDAO.class);
 
 
     EntityTransaction trans = createNiceMock(EntityTransaction.class);
     EntityTransaction trans = createNiceMock(EntityTransaction.class);
     CriteriaBuilder cb = createNiceMock(CriteriaBuilder.class);
     CriteriaBuilder cb = createNiceMock(CriteriaBuilder.class);
@@ -316,18 +321,19 @@ public class UpgradeCatalog170Test {
     expect(configHelper.findConfigTypesByPropertyName(new StackId("HDP", "2.1"), "content")).andReturn(envDicts).once();
     expect(configHelper.findConfigTypesByPropertyName(new StackId("HDP", "2.1"), "content")).andReturn(envDicts).once();
     expect(configHelper.getPropertyValueFromStackDefenitions(cluster, "hadoop-env", "content")).andReturn("env file contents").once();
     expect(configHelper.getPropertyValueFromStackDefenitions(cluster, "hadoop-env", "content")).andReturn("env file contents").once();
 
 
-    expect(injector.getInstance(UserDAO.class)).andReturn(userDAO).once();
-    expect(injector.getInstance(PrincipalDAO.class)).andReturn(principalDAO).once();
-    expect(injector.getInstance(PrincipalTypeDAO.class)).andReturn(principalTypeDAO).once();
-    expect(injector.getInstance(ClusterDAO.class)).andReturn(clusterDAO).once();
-    expect(injector.getInstance(ResourceTypeDAO.class)).andReturn(resourceTypeDAO).once();
-    expect(injector.getInstance(ResourceDAO.class)).andReturn(resourceDAO).once();
-    expect(injector.getInstance(ViewDAO.class)).andReturn(viewDAO).once();
-    expect(injector.getInstance(ViewInstanceDAO.class)).andReturn(viewInstanceDAO).once();
-    expect(injector.getInstance(PermissionDAO.class)).andReturn(permissionDAO).once();
-    expect(injector.getInstance(PrivilegeDAO.class)).andReturn(privilegeDAO).once();
-
-    expect(userDAO.findAll()).andReturn(Collections.<UserEntity> emptyList()).anyTimes();
+    expect(injector.getInstance(UserDAO.class)).andReturn(userDAO).anyTimes();
+    expect(injector.getInstance(PrincipalDAO.class)).andReturn(principalDAO).anyTimes();
+    expect(injector.getInstance(PrincipalTypeDAO.class)).andReturn(principalTypeDAO).anyTimes();
+    expect(injector.getInstance(ClusterDAO.class)).andReturn(clusterDAO).anyTimes();
+    expect(injector.getInstance(ResourceTypeDAO.class)).andReturn(resourceTypeDAO).anyTimes();
+    expect(injector.getInstance(ResourceDAO.class)).andReturn(resourceDAO).anyTimes();
+    expect(injector.getInstance(ViewDAO.class)).andReturn(viewDAO).anyTimes();
+    expect(injector.getInstance(ViewInstanceDAO.class)).andReturn(viewInstanceDAO).anyTimes();
+    expect(injector.getInstance(PermissionDAO.class)).andReturn(permissionDAO).anyTimes();
+    expect(injector.getInstance(PrivilegeDAO.class)).andReturn(privilegeDAO).anyTimes();
+    expect(injector.getInstance(KeyValueDAO.class)).andReturn(keyValueDAO).anyTimes();
+
+    expect(userDAO.findAll()).andReturn(Collections.<UserEntity> emptyList()).times(2);
     expect(clusterDAO.findAll()).andReturn(Collections.<ClusterEntity> emptyList()).anyTimes();
     expect(clusterDAO.findAll()).andReturn(Collections.<ClusterEntity> emptyList()).anyTimes();
     expect(viewDAO.findAll()).andReturn(Collections.<ViewEntity> emptyList()).anyTimes();
     expect(viewDAO.findAll()).andReturn(Collections.<ViewEntity> emptyList()).anyTimes();
     expect(viewInstanceDAO.findAll()).andReturn(Collections.<ViewInstanceEntity> emptyList()).anyTimes();
     expect(viewInstanceDAO.findAll()).andReturn(Collections.<ViewInstanceEntity> emptyList()).anyTimes();
@@ -338,9 +344,29 @@ public class UpgradeCatalog170Test {
     expect(cluster.getDesiredConfigByType("pig-properties")).andReturn(pigConfig).anyTimes();
     expect(cluster.getDesiredConfigByType("pig-properties")).andReturn(pigConfig).anyTimes();
     expect(pigConfig.getProperties()).andReturn(pigSettings).anyTimes();
     expect(pigConfig.getProperties()).andReturn(pigSettings).anyTimes();
 
 
+    ViewEntity jobsView = createNiceMock(ViewEntity.class);
+    KeyValueEntity showJobsKeyValue = createNiceMock(KeyValueEntity.class);
+    UserEntity user = createNiceMock(UserEntity.class);
+
+    expect(userDAO.findAll()).andReturn(Collections.singletonList(user));
+    expect(jobsView.getCommonName()).andReturn(UpgradeCatalog170.JOBS_VIEW_NAME);
+    expect(jobsView.getVersion()).andReturn("1.0.0");
+    expect(viewDAO.findByCommonName(UpgradeCatalog170.JOBS_VIEW_NAME)).andReturn(jobsView).once();
+    expect(showJobsKeyValue.getValue()).andReturn("true");
+    expect(keyValueDAO.findByKey(UpgradeCatalog170.SHOW_JOBS_FOR_NON_ADMIN_KEY)).andReturn(showJobsKeyValue);
+    expect(privilegeDAO.findAllByPrincipal(anyObject(List.class))).andReturn(Collections.<PrivilegeEntity>emptyList());
+    expect(viewDAO.merge(jobsView)).andReturn(jobsView);
+
+    resourceDAO.create(anyObject(ResourceEntity.class));
+    viewInstanceDAO.create(anyObject(ViewInstanceEntity.class));
+    keyValueDAO.remove(showJobsKeyValue);
+    privilegeDAO.create(anyObject(PrivilegeEntity.class));
+
     replay(entityManager, trans, upgradeCatalog, cb, cq, hrc, q);
     replay(entityManager, trans, upgradeCatalog, cb, cq, hrc, q);
     replay(dbAccessor, configuration, injector, cluster, clusters, amc, config, configHelper, pigConfig);
     replay(dbAccessor, configuration, injector, cluster, clusters, amc, config, configHelper, pigConfig);
     replay(userDAO, clusterDAO, viewDAO, viewInstanceDAO, permissionDAO);
     replay(userDAO, clusterDAO, viewDAO, viewInstanceDAO, permissionDAO);
+    replay(resourceTypeDAO, resourceDAO, keyValueDAO, privilegeDAO);
+    replay(jobsView, showJobsKeyValue, user);
 
 
     Class<?> c = AbstractUpgradeCatalog.class;
     Class<?> c = AbstractUpgradeCatalog.class;
     Field f = c.getDeclaredField("configuration");
     Field f = c.getDeclaredField("configuration");
@@ -355,7 +381,8 @@ public class UpgradeCatalog170Test {
 
 
     upgradeCatalog.executeDMLUpdates();
     upgradeCatalog.executeDMLUpdates();
 
 
-    verify(upgradeCatalog, dbAccessor, configuration, injector, cluster, clusters, amc, config, configHelper);
+    verify(upgradeCatalog, dbAccessor, configuration, injector, cluster, clusters, amc, config, configHelper,
+        jobsView, showJobsKeyValue, privilegeDAO, viewDAO, viewInstanceDAO, resourceDAO, keyValueDAO);
   }
   }
 
 
 
 

+ 95 - 147
ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java

@@ -22,6 +22,7 @@ import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
 import static org.easymock.EasyMock.verify;
 
 
 import java.io.File;
 import java.io.File;
@@ -78,9 +79,7 @@ import org.apache.ambari.server.view.events.EventImplTest;
 import org.apache.ambari.view.events.Event;
 import org.apache.ambari.view.events.Event;
 import org.apache.ambari.view.events.Listener;
 import org.apache.ambari.view.events.Listener;
 import org.easymock.EasyMock;
 import org.easymock.EasyMock;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 import org.springframework.security.core.GrantedAuthority;
 import org.springframework.security.core.GrantedAuthority;
 
 
@@ -149,8 +148,21 @@ public class ViewRegistryTest {
       "    </instance>\n" +
       "    </instance>\n" +
       "</view>";
       "</view>";
 
 
+  // registry mocks
+  private static final ViewDAO viewDAO = createMock(ViewDAO.class);
+  private static final ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
+  private static final UserDAO userDAO = createNiceMock(UserDAO.class);
+  private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
+  private static final PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
+  private static final ResourceDAO resourceDAO = createNiceMock(ResourceDAO.class);
+  private static final ResourceTypeDAO resourceTypeDAO = createNiceMock(ResourceTypeDAO.class);
+  private static final SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
+  private static final Configuration configuration = createNiceMock(Configuration.class);
+
   @Test
   @Test
   public void testReadViewArchives() throws Exception {
   public void testReadViewArchives() throws Exception {
+    ViewRegistry registry = getRegistry();
+
     Configuration configuration = createNiceMock(Configuration.class);
     Configuration configuration = createNiceMock(Configuration.class);
     File viewDir = createNiceMock(File.class);
     File viewDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
@@ -171,14 +183,6 @@ public class ViewRegistryTest {
     resourceTypeEntity.setId(10);
     resourceTypeEntity.setId(10);
     resourceTypeEntity.setName("MY_VIEW{1.0.0}");
     resourceTypeEntity.setName("MY_VIEW{1.0.0}");
 
 
-    ViewDAO vDAO = createMock(ViewDAO.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ViewInstanceDAO viDAO = createNiceMock(ViewInstanceDAO.class);
-
-    ViewRegistry.setViewDAO(vDAO);
-    ViewRegistry.setResourceDAO(rDAO);
-    ViewRegistry.setInstanceDAO(viDAO);
-
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     viewDefinition.setResourceType(resourceTypeEntity);
     viewDefinition.setResourceType(resourceTypeEntity);
 
 
@@ -255,17 +259,14 @@ public class ViewRegistryTest {
     expect(libDir.listFiles()).andReturn(new File[]{fileEntry});
     expect(libDir.listFiles()).andReturn(new File[]{fileEntry});
     expect(fileEntry.toURI()).andReturn(new URI("file:./"));
     expect(fileEntry.toURI()).andReturn(new URI("file:./"));
 
 
-    expect(vDAO.findByName("MY_VIEW{1.0.0}")).andReturn(viewDefinition);
-
-    expect(vDAO.findAll()).andReturn(Collections.<ViewEntity>emptyList());
+    expect(viewDAO.findByName("MY_VIEW{1.0.0}")).andReturn(viewDefinition);
 
 
-    expect(viDAO.merge(EasyMock.anyObject(ViewInstanceEntity.class))).andReturn(null).times(2);
+    expect(viewDAO.findAll()).andReturn(Collections.<ViewEntity>emptyList());
 
 
     // replay mocks
     // replay mocks
     replay(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
     replay(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
-        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, rDAO, vDAO, viDAO);
+        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, resourceDAO, viewDAO, viewInstanceDAO);
 
 
-    ViewRegistry registry = ViewRegistry.getInstance();
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
 
 
     Set<ViewInstanceEntity> instanceEntities = registry.readViewArchives(configuration);
     Set<ViewInstanceEntity> instanceEntities = registry.readViewArchives(configuration);
@@ -274,11 +275,13 @@ public class ViewRegistryTest {
 
 
     // verify mocks
     // verify mocks
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
-        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, rDAO, vDAO, viDAO);
+        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, resourceDAO, viewDAO, viewInstanceDAO);
   }
   }
 
 
   @Test
   @Test
   public void testReadViewArchives_exception() throws Exception {
   public void testReadViewArchives_exception() throws Exception {
+    ViewRegistry registry = getRegistry();
+
     Configuration configuration = createNiceMock(Configuration.class);
     Configuration configuration = createNiceMock(Configuration.class);
     File viewDir = createNiceMock(File.class);
     File viewDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
     File extractedArchiveDir = createNiceMock(File.class);
@@ -299,10 +302,6 @@ public class ViewRegistryTest {
     resourceTypeEntity.setId(10);
     resourceTypeEntity.setId(10);
     resourceTypeEntity.setName("MY_VIEW{1.0.0}");
     resourceTypeEntity.setName("MY_VIEW{1.0.0}");
 
 
-    ViewDAO vDAO = createMock(ViewDAO.class);
-
-    ViewRegistry.setViewDAO(vDAO);
-
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     viewDefinition.setResourceType(resourceTypeEntity);
     viewDefinition.setResourceType(resourceTypeEntity);
 
 
@@ -379,14 +378,13 @@ public class ViewRegistryTest {
     expect(libDir.listFiles()).andReturn(new File[]{fileEntry});
     expect(libDir.listFiles()).andReturn(new File[]{fileEntry});
     expect(fileEntry.toURI()).andReturn(new URI("file:./"));
     expect(fileEntry.toURI()).andReturn(new URI("file:./"));
 
 
-    expect(vDAO.findAll()).andReturn(Collections.<ViewEntity>emptyList());
-    expect(vDAO.findByName("MY_VIEW{1.0.0}")).andReturn(viewDefinition);
+    expect(viewDAO.findAll()).andReturn(Collections.<ViewEntity>emptyList());
+    expect(viewDAO.findByName("MY_VIEW{1.0.0}")).andThrow(new IllegalArgumentException("Expected exception."));
 
 
     // replay mocks
     // replay mocks
     replay(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
     replay(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
-        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, vDAO);
+        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, viewDAO);
 
 
-    ViewRegistry registry = ViewRegistry.getInstance();
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
     registry.setHelper(new TestViewRegistryHelper(viewConfigs, files, outputStreams, jarFiles));
 
 
     Set<ViewInstanceEntity> instanceEntities = registry.readViewArchives(configuration);
     Set<ViewInstanceEntity> instanceEntities = registry.readViewArchives(configuration);
@@ -395,12 +393,12 @@ public class ViewRegistryTest {
 
 
     // verify mocks
     // verify mocks
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
     verify(configuration, viewDir, extractedArchiveDir, viewArchive, archiveDir, entryFile, classesDir,
-        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, vDAO);
+        libDir, fileEntry, viewJarFile, enumeration, jarEntry, is, fos, viewDAO);
   }
   }
 
 
   @Test
   @Test
   public void testListener() throws Exception {
   public void testListener() throws Exception {
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     TestListener listener = new TestListener();
     TestListener listener = new TestListener();
     registry.registerListener(listener, "MY_VIEW", "1.0.0");
     registry.registerListener(listener, "MY_VIEW", "1.0.0");
@@ -424,7 +422,7 @@ public class ViewRegistryTest {
   public void testAddGetDefinitions() throws Exception {
   public void testAddGetDefinitions() throws Exception {
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
 
 
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     registry.addDefinition(viewDefinition);
     registry.addDefinition(viewDefinition);
 
 
@@ -442,7 +440,7 @@ public class ViewRegistryTest {
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     ViewInstanceEntity viewInstanceDefinition = ViewInstanceEntityTest.getViewInstanceEntity();
     ViewInstanceEntity viewInstanceDefinition = ViewInstanceEntityTest.getViewInstanceEntity();
 
 
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     registry.addDefinition(viewDefinition);
     registry.addDefinition(viewDefinition);
 
 
@@ -460,7 +458,7 @@ public class ViewRegistryTest {
   @Test
   @Test
   public void testGetSubResourceDefinitions() throws Exception {
   public void testGetSubResourceDefinitions() throws Exception {
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
     ViewEntity viewDefinition = ViewEntityTest.getViewEntity();
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     ResourceConfig config = ResourceConfigTest.getResourceConfigs().get(0);
     ResourceConfig config = ResourceConfigTest.getResourceConfigs().get(0);
     Resource.Type type1 = new Resource.Type("myType");
     Resource.Type type1 = new Resource.Type("myType");
@@ -478,7 +476,7 @@ public class ViewRegistryTest {
 
 
   @Test
   @Test
   public void testAddInstanceDefinition() throws Exception {
   public void testAddInstanceDefinition() throws Exception {
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     ViewEntity viewEntity = ViewEntityTest.getViewEntity();
     ViewEntity viewEntity = ViewEntityTest.getViewEntity();
     InstanceConfig instanceConfig = InstanceConfigTest.getInstanceConfigs().get(0);
     InstanceConfig instanceConfig = InstanceConfigTest.getInstanceConfigs().get(0);
@@ -509,18 +507,7 @@ public class ViewRegistryTest {
   @Test
   @Test
   public void testInstallViewInstance() throws Exception {
   public void testInstallViewInstance() throws Exception {
 
 
-    ViewDAO viewDAO = createNiceMock(ViewDAO.class);
-    ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
-    UserDAO userDAO = createNiceMock(UserDAO.class);
-    MemberDAO memberDAO = createNiceMock(MemberDAO.class);
-    PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ResourceTypeDAO rtDAO = createNiceMock(ResourceTypeDAO.class);
-
-    ViewRegistry.init(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, securityHelper, rDAO, rtDAO);
-
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     Properties properties = new Properties();
     Properties properties = new Properties();
     properties.put("p1", "v1");
     properties.put("p1", "v1");
@@ -551,18 +538,7 @@ public class ViewRegistryTest {
   @Test
   @Test
   public void testInstallViewInstance_invalid() throws Exception {
   public void testInstallViewInstance_invalid() throws Exception {
 
 
-    ViewDAO viewDAO = createNiceMock(ViewDAO.class);
-    ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
-    UserDAO userDAO = createNiceMock(UserDAO.class);
-    MemberDAO memberDAO = createNiceMock(MemberDAO.class);
-    PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ResourceTypeDAO rtDAO = createNiceMock(ResourceTypeDAO.class);
-
-    ViewRegistry.init(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, securityHelper, rDAO, rtDAO);
-
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     Properties properties = new Properties();
     Properties properties = new Properties();
     properties.put("p1", "v1");
     properties.put("p1", "v1");
@@ -573,7 +549,7 @@ public class ViewRegistryTest {
     ViewEntity viewEntity = getViewEntity(config, ambariConfig, getClass().getClassLoader(), "");
     ViewEntity viewEntity = getViewEntity(config, ambariConfig, getClass().getClassLoader(), "");
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
 
 
-    replay(viewDAO, viewInstanceDAO, securityHelper, rtDAO);
+    replay(viewDAO, viewInstanceDAO, securityHelper, resourceTypeDAO);
 
 
     registry.addDefinition(viewEntity);
     registry.addDefinition(viewEntity);
     try {
     try {
@@ -582,24 +558,13 @@ public class ViewRegistryTest {
     } catch (IllegalStateException e) {
     } catch (IllegalStateException e) {
       // expected
       // expected
     }
     }
-    verify(viewDAO, viewInstanceDAO, securityHelper);
+    verify(viewDAO, viewInstanceDAO, securityHelper, resourceTypeDAO);
   }
   }
 
 
   @Test
   @Test
   public void testInstallViewInstance_unknownView() throws Exception {
   public void testInstallViewInstance_unknownView() throws Exception {
 
 
-    ViewDAO viewDAO = createNiceMock(ViewDAO.class);
-    ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
-    UserDAO userDAO = createNiceMock(UserDAO.class);
-    MemberDAO memberDAO = createNiceMock(MemberDAO.class);
-    PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ResourceTypeDAO rtDAO = createNiceMock(ResourceTypeDAO.class);
-
-    ViewRegistry.init(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, securityHelper, rDAO, rtDAO);
-
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     Properties properties = new Properties();
     Properties properties = new Properties();
     properties.put("p1", "v1");
     properties.put("p1", "v1");
@@ -611,7 +576,7 @@ public class ViewRegistryTest {
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
     viewInstanceEntity.setViewName("BOGUS_VIEW");
     viewInstanceEntity.setViewName("BOGUS_VIEW");
 
 
-    replay(viewDAO, viewInstanceDAO, securityHelper, rtDAO);
+    replay(viewDAO, viewInstanceDAO, securityHelper, resourceTypeDAO);
 
 
     registry.addDefinition(viewEntity);
     registry.addDefinition(viewEntity);
     try {
     try {
@@ -620,24 +585,13 @@ public class ViewRegistryTest {
     } catch (IllegalArgumentException e) {
     } catch (IllegalArgumentException e) {
       // expected
       // expected
     }
     }
-    verify(viewDAO, viewInstanceDAO, securityHelper);
+    verify(viewDAO, viewInstanceDAO, securityHelper, resourceTypeDAO);
   }
   }
 
 
   @Test
   @Test
   public void testUpdateViewInstance() throws Exception {
   public void testUpdateViewInstance() throws Exception {
 
 
-    ViewDAO viewDAO = createNiceMock(ViewDAO.class);
-    ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
-    UserDAO userDAO = createNiceMock(UserDAO.class);
-    MemberDAO memberDAO = createNiceMock(MemberDAO.class);
-    PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ResourceTypeDAO rtDAO = createNiceMock(ResourceTypeDAO.class);
-
-    ViewRegistry.init(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, securityHelper, rDAO, rtDAO);
-
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     Properties properties = new Properties();
     Properties properties = new Properties();
     properties.put("p1", "v1");
     properties.put("p1", "v1");
@@ -649,7 +603,6 @@ public class ViewRegistryTest {
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
     ViewInstanceEntity updateInstance = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
     ViewInstanceEntity updateInstance = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
 
 
-    expect(viewInstanceDAO.merge(viewInstanceEntity)).andReturn(null);
     expect(viewInstanceDAO.merge(viewInstanceEntity)).andReturn(viewInstanceEntity);
     expect(viewInstanceDAO.merge(viewInstanceEntity)).andReturn(viewInstanceEntity);
     expect(viewInstanceDAO.findByName("MY_VIEW{1.0.0}", viewInstanceEntity.getInstanceName())).andReturn(viewInstanceEntity);
     expect(viewInstanceDAO.findByName("MY_VIEW{1.0.0}", viewInstanceEntity.getInstanceName())).andReturn(viewInstanceEntity);
 
 
@@ -672,18 +625,7 @@ public class ViewRegistryTest {
   @Test
   @Test
   public void testUpdateViewInstance_invalid() throws Exception {
   public void testUpdateViewInstance_invalid() throws Exception {
 
 
-    ViewDAO viewDAO = createNiceMock(ViewDAO.class);
-    ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
-    UserDAO userDAO = createNiceMock(UserDAO.class);
-    MemberDAO memberDAO = createNiceMock(MemberDAO.class);
-    PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ResourceTypeDAO rtDAO = createNiceMock(ResourceTypeDAO.class);
-
-    ViewRegistry.init(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, securityHelper, rDAO, rtDAO);
-
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     Properties properties = new Properties();
     Properties properties = new Properties();
     properties.put("p1", "v1");
     properties.put("p1", "v1");
@@ -716,18 +658,7 @@ public class ViewRegistryTest {
   @Test
   @Test
   public void testRemoveInstanceData() throws Exception {
   public void testRemoveInstanceData() throws Exception {
 
 
-    ViewDAO viewDAO = createNiceMock(ViewDAO.class);
-    ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
-    UserDAO userDAO = createNiceMock(UserDAO.class);
-    MemberDAO memberDAO = createNiceMock(MemberDAO.class);
-    PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-    ResourceDAO rDAO = createNiceMock(ResourceDAO.class);
-    ResourceTypeDAO rtDAO = createNiceMock(ResourceTypeDAO.class);
-
-    ViewRegistry.init(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, securityHelper, rDAO, rtDAO);
-
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     ViewInstanceEntity viewInstanceEntity = ViewInstanceEntityTest.getViewInstanceEntity();
     ViewInstanceEntity viewInstanceEntity = ViewInstanceEntityTest.getViewInstanceEntity();
 
 
@@ -747,15 +678,12 @@ public class ViewRegistryTest {
 
 
   @Test
   @Test
   public void testIncludeDefinitionForAdmin() {
   public void testIncludeDefinitionForAdmin() {
-    ViewRegistry viewRegistry = ViewRegistry.getInstance();
+    ViewRegistry viewRegistry = getRegistry();
     ViewEntity viewEntity = createNiceMock(ViewEntity.class);
     ViewEntity viewEntity = createNiceMock(ViewEntity.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
     AmbariGrantedAuthority adminAuthority = createNiceMock(AmbariGrantedAuthority.class);
     AmbariGrantedAuthority adminAuthority = createNiceMock(AmbariGrantedAuthority.class);
     PrivilegeEntity privilegeEntity = createNiceMock(PrivilegeEntity.class);
     PrivilegeEntity privilegeEntity = createNiceMock(PrivilegeEntity.class);
     PermissionEntity permissionEntity = createNiceMock(PermissionEntity.class);
     PermissionEntity permissionEntity = createNiceMock(PermissionEntity.class);
 
 
-    viewRegistry.setSecurityHelper(securityHelper);
-
     Collection<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>();
     Collection<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>();
     authorities.add(adminAuthority);
     authorities.add(adminAuthority);
 
 
@@ -764,20 +692,19 @@ public class ViewRegistryTest {
     expect(adminAuthority.getPrivilegeEntity()).andReturn(privilegeEntity);
     expect(adminAuthority.getPrivilegeEntity()).andReturn(privilegeEntity);
     expect(privilegeEntity.getPermission()).andReturn(permissionEntity);
     expect(privilegeEntity.getPermission()).andReturn(permissionEntity);
     expect(permissionEntity.getId()).andReturn(PermissionEntity.AMBARI_ADMIN_PERMISSION);
     expect(permissionEntity.getId()).andReturn(PermissionEntity.AMBARI_ADMIN_PERMISSION);
-    replay(securityHelper, adminAuthority, privilegeEntity, permissionEntity);
+
+    expect(configuration.getApiAuthentication()).andReturn(true);
+    replay(securityHelper, adminAuthority, privilegeEntity, permissionEntity, configuration);
 
 
     Assert.assertTrue(viewRegistry.includeDefinition(viewEntity));
     Assert.assertTrue(viewRegistry.includeDefinition(viewEntity));
 
 
-    verify(securityHelper, adminAuthority, privilegeEntity, permissionEntity);
+    verify(securityHelper, adminAuthority, privilegeEntity, permissionEntity, configuration);
   }
   }
 
 
   @Test
   @Test
   public void testIncludeDefinitionForUserNoInstances() {
   public void testIncludeDefinitionForUserNoInstances() {
-    ViewRegistry viewRegistry = ViewRegistry.getInstance();
+    ViewRegistry viewRegistry = getRegistry();
     ViewEntity viewEntity = createNiceMock(ViewEntity.class);
     ViewEntity viewEntity = createNiceMock(ViewEntity.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
-
-    viewRegistry.setSecurityHelper(securityHelper);
 
 
     Collection<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>();
     Collection<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>();
 
 
@@ -786,26 +713,25 @@ public class ViewRegistryTest {
     securityHelper.getCurrentAuthorities();
     securityHelper.getCurrentAuthorities();
     EasyMock.expectLastCall().andReturn(authorities);
     EasyMock.expectLastCall().andReturn(authorities);
     expect(viewEntity.getInstances()).andReturn(instances);
     expect(viewEntity.getInstances()).andReturn(instances);
-    replay(securityHelper, viewEntity);
+
+    expect(configuration.getApiAuthentication()).andReturn(true);
+    replay(securityHelper, viewEntity, configuration);
 
 
     Assert.assertFalse(viewRegistry.includeDefinition(viewEntity));
     Assert.assertFalse(viewRegistry.includeDefinition(viewEntity));
 
 
-    verify(securityHelper, viewEntity);
+    verify(securityHelper, viewEntity, configuration);
   }
   }
 
 
   @Test
   @Test
   public void testIncludeDefinitionForUserHasAccess() {
   public void testIncludeDefinitionForUserHasAccess() {
-    ViewRegistry viewRegistry = ViewRegistry.getInstance();
+    ViewRegistry viewRegistry = getRegistry();
     ViewEntity viewEntity = createNiceMock(ViewEntity.class);
     ViewEntity viewEntity = createNiceMock(ViewEntity.class);
-    SecurityHelper securityHelper = createNiceMock(SecurityHelper.class);
     ViewInstanceEntity instanceEntity = createNiceMock(ViewInstanceEntity.class);
     ViewInstanceEntity instanceEntity = createNiceMock(ViewInstanceEntity.class);
     ResourceEntity resourceEntity = createNiceMock(ResourceEntity.class);
     ResourceEntity resourceEntity = createNiceMock(ResourceEntity.class);
     AmbariGrantedAuthority viewUseAuthority = createNiceMock(AmbariGrantedAuthority.class);
     AmbariGrantedAuthority viewUseAuthority = createNiceMock(AmbariGrantedAuthority.class);
     PrivilegeEntity privilegeEntity = createNiceMock(PrivilegeEntity.class);
     PrivilegeEntity privilegeEntity = createNiceMock(PrivilegeEntity.class);
     PermissionEntity permissionEntity = createNiceMock(PermissionEntity.class);
     PermissionEntity permissionEntity = createNiceMock(PermissionEntity.class);
 
 
-    viewRegistry.setSecurityHelper(securityHelper);
-
     Collection<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>();
     Collection<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>();
     authorities.add(viewUseAuthority);
     authorities.add(viewUseAuthority);
 
 
@@ -820,34 +746,25 @@ public class ViewRegistryTest {
     expect(permissionEntity.getId()).andReturn(PermissionEntity.VIEW_USE_PERMISSION).anyTimes();
     expect(permissionEntity.getId()).andReturn(PermissionEntity.VIEW_USE_PERMISSION).anyTimes();
     securityHelper.getCurrentAuthorities();
     securityHelper.getCurrentAuthorities();
     EasyMock.expectLastCall().andReturn(authorities).anyTimes();
     EasyMock.expectLastCall().andReturn(authorities).anyTimes();
-    replay(securityHelper, viewEntity, instanceEntity, viewUseAuthority, privilegeEntity, permissionEntity);
+    expect(configuration.getApiAuthentication()).andReturn(true);
+    replay(securityHelper, viewEntity, instanceEntity, viewUseAuthority, privilegeEntity, permissionEntity, configuration);
 
 
     Assert.assertTrue(viewRegistry.includeDefinition(viewEntity));
     Assert.assertTrue(viewRegistry.includeDefinition(viewEntity));
 
 
-    verify(securityHelper, viewEntity, instanceEntity, viewUseAuthority, privilegeEntity, permissionEntity);
+    verify(securityHelper, viewEntity, instanceEntity, viewUseAuthority, privilegeEntity, permissionEntity, configuration);
   }
   }
 
 
-  @Before
-  public void before() throws Exception {
-    clear();
-  }
+  @Test
+  public void testIncludeDefinitionForNoApiAuthentication() {
+    ViewRegistry viewRegistry = getRegistry();
+    ViewEntity viewEntity = createNiceMock(ViewEntity.class);
 
 
-  @AfterClass
-  public static void afterClass() {
-    clear();
-  }
+    expect(configuration.getApiAuthentication()).andReturn(false);
+    replay(securityHelper, viewEntity, configuration);
 
 
-  public static void clear() {
-    ViewRegistry.getInstance().clear();
-
-    ViewRegistry.setInstanceDAO(null);
-    ViewRegistry.setMemberDAO(null);
-    ViewRegistry.setPrivilegeDAO(null);
-    ViewRegistry.setResourceDAO(null);
-    ViewRegistry.setResourceTypeDAO(null);
-    ViewRegistry.setSecurityHelper(null);
-    ViewRegistry.setUserDAO(null);
-    ViewRegistry.setViewDAO(null);
+    Assert.assertTrue(viewRegistry.includeDefinition(viewEntity));
+
+    verify(securityHelper, viewEntity, configuration);
   }
   }
 
 
   public class TestViewRegistryHelper extends ViewRegistry.ViewRegistryHelper {
   public class TestViewRegistryHelper extends ViewRegistry.ViewRegistryHelper {
@@ -912,15 +829,46 @@ public class ViewRegistryTest {
     }
     }
   }
   }
 
 
+  private static ViewRegistry getRegistry() {
+    ViewRegistry instance = getRegistry(viewDAO, viewInstanceDAO,
+        userDAO, memberDAO, privilegeDAO,
+        resourceDAO, resourceTypeDAO, securityHelper);
+
+    reset(viewDAO, resourceDAO, viewInstanceDAO, userDAO, memberDAO,
+        privilegeDAO, resourceTypeDAO, securityHelper, configuration);
+
+    return instance;
+  }
+
+  public static ViewRegistry getRegistry(ViewDAO viewDAO, ViewInstanceDAO viewInstanceDAO,
+                                  UserDAO userDAO, MemberDAO memberDAO,
+                                  PrivilegeDAO privilegeDAO, ResourceDAO resourceDAO,
+                                  ResourceTypeDAO resourceTypeDAO, SecurityHelper securityHelper ) {
+
+    ViewRegistry instance = new ViewRegistry();
+
+    instance.viewDAO = viewDAO;
+    instance.resourceDAO = resourceDAO;
+    instance.instanceDAO = viewInstanceDAO;
+    instance.userDAO = userDAO;
+    instance.memberDAO = memberDAO;
+    instance.privilegeDAO = privilegeDAO;
+    instance.resourceTypeDAO = resourceTypeDAO;
+    instance.securityHelper = securityHelper;
+    instance.configuration = configuration;
+
+    return instance;
+  }
+
   public static ViewEntity getViewEntity(ViewConfig viewConfig, Configuration ambariConfig,
   public static ViewEntity getViewEntity(ViewConfig viewConfig, Configuration ambariConfig,
                                      ClassLoader cl, String archivePath) throws Exception{
                                      ClassLoader cl, String archivePath) throws Exception{
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     return registry.createViewDefinition(viewConfig, ambariConfig, cl, archivePath);
     return registry.createViewDefinition(viewConfig, ambariConfig, cl, archivePath);
   }
   }
 
 
   public static ViewInstanceEntity getViewInstanceEntity(ViewEntity viewDefinition, InstanceConfig instanceConfig) throws Exception {
   public static ViewInstanceEntity getViewInstanceEntity(ViewEntity viewDefinition, InstanceConfig instanceConfig) throws Exception {
-    ViewRegistry registry = ViewRegistry.getInstance();
+    ViewRegistry registry = getRegistry();
 
 
     ViewInstanceEntity viewInstanceDefinition =
     ViewInstanceEntity viewInstanceDefinition =
         new ViewInstanceEntity(viewDefinition, instanceConfig);
         new ViewInstanceEntity(viewDefinition, instanceConfig);

+ 73 - 13
ambari-server/src/test/python/TestAmbariServer.py

@@ -31,11 +31,15 @@ import platform
 import shutil
 import shutil
 from pwd import getpwnam
 from pwd import getpwnam
 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
+ 
+# We have to use this import HACK because the filename contains a dash
 from ambari_commons import Firewall, OSCheck, OSConst, FirewallChecks
 from ambari_commons import Firewall, OSCheck, OSConst, FirewallChecks
 
 
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
-  # We have to use this import HACK because the filename contains a dash
-  ambari_server = __import__('ambari-server')
+  with patch("os.symlink"):
+    with patch("__builtin__.open"):
+      with patch("glob.glob", return_value = ['/etc/init.d/postgresql-9.3']):
+        ambari_server = __import__('ambari-server')
 
 
 FatalException = ambari_server.FatalException
 FatalException = ambari_server.FatalException
 NonFatalException = ambari_server.NonFatalException
 NonFatalException = ambari_server.NonFatalException
@@ -1005,54 +1009,67 @@ class TestAmbariServer(TestCase):
     ambari_server.store_password_file("password", "passfile")
     ambari_server.store_password_file("password", "passfile")
     self.assertTrue(set_file_permissions_mock.called)
     self.assertTrue(set_file_permissions_mock.called)
 
 
-
-  @patch.object(FirewallChecks, "run_os_command")
+  @patch("subprocess.Popen")
   @patch.object(OSCheck, "get_os_family")
   @patch.object(OSCheck, "get_os_family")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_major_version")
   @patch.object(OSCheck, "get_os_major_version")
-  def test_check_iptables_is_running(self, get_os_major_version_mock, get_os_type_mock, get_os_family_mock, run_os_command_mock):
+  def test_check_iptables_is_running(self, get_os_major_version_mock, get_os_type_mock, get_os_family_mock, popen_mock):
 
 
     get_os_major_version_mock.return_value = 18
     get_os_major_version_mock.return_value = 18
     get_os_type_mock.return_value = OSConst.OS_FEDORA
     get_os_type_mock.return_value = OSConst.OS_FEDORA
     get_os_family_mock.return_value = OSConst.REDHAT_FAMILY
     get_os_family_mock.return_value = OSConst.REDHAT_FAMILY
 
 
     firewall_obj = Firewall().getFirewallObject()
     firewall_obj = Firewall().getFirewallObject()
-    run_os_command_mock.return_value = 0, "active", ""
+    p = MagicMock()
+    p.communicate.return_value = ("active", "err")
+    p.returncode = 0
+    popen_mock.return_value = p
     self.assertEqual("Fedora18FirewallChecks", firewall_obj.__class__.__name__)
     self.assertEqual("Fedora18FirewallChecks", firewall_obj.__class__.__name__)
     self.assertTrue(firewall_obj.check_iptables())
     self.assertTrue(firewall_obj.check_iptables())
-    run_os_command_mock.return_value = 3, "", ""
+    p.communicate.return_value = ("", "err")
+    p.returncode = 3
     self.assertFalse(firewall_obj.check_iptables())
     self.assertFalse(firewall_obj.check_iptables())
+    self.assertEqual("err", firewall_obj.stderrdata)
 
 
 
 
     get_os_type_mock.return_value = OSConst.OS_UBUNTU
     get_os_type_mock.return_value = OSConst.OS_UBUNTU
     get_os_family_mock.return_value = OSConst.DEBIAN_FAMILY
     get_os_family_mock.return_value = OSConst.DEBIAN_FAMILY
 
 
     firewall_obj = Firewall().getFirewallObject()
     firewall_obj = Firewall().getFirewallObject()
-    run_os_command_mock.return_value = 0, "Status: active", ""
+    p.communicate.return_value = ("Status: active", "err")
+    p.returncode = 0
     self.assertEqual("UbuntuFirewallChecks", firewall_obj.__class__.__name__)
     self.assertEqual("UbuntuFirewallChecks", firewall_obj.__class__.__name__)
     self.assertTrue(firewall_obj.check_iptables())
     self.assertTrue(firewall_obj.check_iptables())
-    run_os_command_mock.return_value = 0, "Status: inactive", ""
+    p.communicate.return_value = ("Status: inactive", "err")
+    p.returncode = 0
     self.assertFalse(firewall_obj.check_iptables())
     self.assertFalse(firewall_obj.check_iptables())
+    self.assertEqual("err", firewall_obj.stderrdata)
 
 
     get_os_type_mock.return_value = ""
     get_os_type_mock.return_value = ""
     get_os_family_mock.return_value = OSConst.SUSE_FAMILY
     get_os_family_mock.return_value = OSConst.SUSE_FAMILY
 
 
     firewall_obj = Firewall().getFirewallObject()
     firewall_obj = Firewall().getFirewallObject()
-    run_os_command_mock.return_value = 0, "### iptables", ""
+    p.communicate.return_value = ("### iptables", "err")
+    p.returncode = 0
     self.assertEqual("SuseFirewallChecks", firewall_obj.__class__.__name__)
     self.assertEqual("SuseFirewallChecks", firewall_obj.__class__.__name__)
     self.assertTrue(firewall_obj.check_iptables())
     self.assertTrue(firewall_obj.check_iptables())
-    run_os_command_mock.return_value = 0, "SuSEfirewall2 not active", ""
+    p.communicate.return_value = ("SuSEfirewall2 not active", "err")
+    p.returncode = 0
     self.assertFalse(firewall_obj.check_iptables())
     self.assertFalse(firewall_obj.check_iptables())
+    self.assertEqual("err", firewall_obj.stderrdata)
 
 
     get_os_type_mock.return_value = ""
     get_os_type_mock.return_value = ""
     get_os_family_mock.return_value = OSConst.REDHAT_FAMILY
     get_os_family_mock.return_value = OSConst.REDHAT_FAMILY
 
 
     firewall_obj = Firewall().getFirewallObject()
     firewall_obj = Firewall().getFirewallObject()
-    run_os_command_mock.return_value = 0, "Table: filter", ""
+    p.communicate.return_value = ("Table: filter", "err")
+    p.returncode = 0
     self.assertEqual("FirewallChecks", firewall_obj.__class__.__name__)
     self.assertEqual("FirewallChecks", firewall_obj.__class__.__name__)
     self.assertTrue(firewall_obj.check_iptables())
     self.assertTrue(firewall_obj.check_iptables())
-    run_os_command_mock.return_value = 3, "", ""
+    p.communicate.return_value = ("", "err")
+    p.returncode = 3
     self.assertFalse(firewall_obj.check_iptables())
     self.assertFalse(firewall_obj.check_iptables())
+    self.assertEqual("err", firewall_obj.stderrdata)
 
 
 
 
   def test_dlprogress(self):
   def test_dlprogress(self):
@@ -5094,3 +5111,46 @@ MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV
 
 
     self.assertTrue(perform_housekeeping_mock.called)
     self.assertTrue(perform_housekeeping_mock.called)
 
 
+  @patch.object(ambari_server, "run_os_command")
+  @patch.object(ambari_server, "print_error_msg")
+  def test_change_objects_owner_both(self,
+                                     print_error_msg_mock,
+                                     run_os_command_mock):
+    args = MagicMock()
+    stdout = " stdout "
+    stderr = " stderr "
+    run_os_command_mock.return_value = 1, stdout, stderr
+
+    ambari_server.VERBOSE = True
+    self.assertRaises(FatalException, ambari_server.change_objects_owner, args)
+    print_error_msg_mock.assert_any_call("stderr")
+    print_error_msg_mock.assert_any_call("stdout")
+
+  @patch.object(ambari_server, "run_os_command")
+  @patch.object(ambari_server, "print_error_msg")
+  def test_change_objects_owner_only_stdout(self,
+                                            print_error_msg_mock,
+                                            run_os_command_mock):
+    args = MagicMock()
+    stdout = " stdout "
+    stderr = ""
+    run_os_command_mock.return_value = 1, stdout, stderr
+
+    ambari_server.VERBOSE = True
+    self.assertRaises(FatalException, ambari_server.change_objects_owner, args)
+    print_error_msg_mock.assert_called_once_with("stdout")
+
+  @patch.object(ambari_server, "run_os_command")
+  @patch.object(ambari_server, "print_error_msg")
+  def test_change_objects_owner_only_stderr(self,
+                                            print_error_msg_mock,
+                                            run_os_command_mock):
+    args = MagicMock()
+    stdout = ""
+    stderr = " stderr "
+    run_os_command_mock.return_value = 1, stdout, stderr
+
+    ambari_server.VERBOSE = True
+    self.assertRaises(FatalException, ambari_server.change_objects_owner, args)
+    print_error_msg_mock.assert_called_once_with("stderr")
+

+ 5 - 3
ambari-server/src/test/python/TestOSCheck.py

@@ -30,9 +30,11 @@ from mock.mock import patch
 from ambari_commons import OSCheck, OSConst
 from ambari_commons import OSCheck, OSConst
 import os_check_type
 import os_check_type
 
 
-with patch("platform.linux_distribution", return_value=('Suse', '11', 'Final')):
-  # We have to use this import HACK because the filename contains a dash
-  ambari_server = __import__('ambari-server')
+utils = __import__('ambari_server.utils').utils
+# We have to use this import HACK because the filename contains a dash
+with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
+  with patch.object(utils, "get_postgre_hba_dir"):
+    ambari_server = __import__('ambari-server')
 
 
 
 
 class TestOSCheck(TestCase):
 class TestOSCheck(TestCase):

+ 38 - 8
ambari-server/src/test/python/TestUtils.py

@@ -19,7 +19,7 @@ limitations under the License.
 import StringIO
 import StringIO
 import sys
 import sys
 from unittest import TestCase
 from unittest import TestCase
-from mock.mock import patch
+from mock.mock import patch, MagicMock
 
 
 
 
 utils = __import__('ambari_server.utils').utils
 utils = __import__('ambari_server.utils').utils
@@ -35,20 +35,50 @@ class TestUtils(TestCase):
     self.assertEqual('9.1', utils.get_ubuntu_pg_version())
     self.assertEqual('9.1', utils.get_ubuntu_pg_version())
 
 
   @patch('ambari_server.utils.get_ubuntu_pg_version')
   @patch('ambari_server.utils.get_ubuntu_pg_version')
-  def test_get_postgre_hba_dir(self, get_ubuntu_pg_version_mock):
-    utils.UBUNTU_PG_HBA_ROOT = '/tmp'
-    utils.PG_HBA_ROOT_DEFAULT = '/redhat/postgre/data'
+  @patch('os.path.isfile')
+  @patch("subprocess.Popen")
+  def test_get_postgre_hba_dir(self, popenMock, os_path_is_fine_mock,
+                               get_ubuntu_pg_version_mock):
+    p = MagicMock()
+    utils.PG_HBA_INIT_FILES['debian'] = '/tmp'
     get_ubuntu_pg_version_mock.return_value = '9.1'
     get_ubuntu_pg_version_mock.return_value = '9.1'
-
-    self.assertEqual('/tmp/9.1/main', utils.get_postgre_hba_dir('ubuntu'))
-    self.assertEqual('/redhat/postgre/data', utils.get_postgre_hba_dir('redhat'))
+    self.assertEqual('/tmp/9.1/main', utils.get_postgre_hba_dir('debian'))
+
+    # ## Tests depends on postgres version ###
+    # 1) PGDATA=/var/lib/pgsql/data
+    os_path_is_fine_mock.return_value = True
+    utils.PG_HBA_ROOT_DEFAULT = '/def/dir'
+    p.communicate.return_value = ('/my/new/location\n', None)
+    p.returncode = 0
+    popenMock.return_value = p
+    self.assertEqual('/my/new/location', utils.get_postgre_hba_dir('redhat'))
+
+    # 2) No value set
+    os_path_is_fine_mock.return_value = True
+    utils.PG_HBA_ROOT_DEFAULT = '/def/dir'
+    p.communicate.return_value = ('\n', None)
+    p.returncode = 0
+    popenMock.return_value = p
+    self.assertEqual('/def/dir', utils.get_postgre_hba_dir('redhat'))
+
+    # 3) Value set - check diff systems
+    os_path_is_fine_mock.return_value = True
+    popenMock.reset()
+    p.communicate.return_value = (None, None)
+    utils.get_postgre_hba_dir('redhat')
+    popenMock.assert_called_with('alias exit=return; source /etc/rc.d/init.d/postgresql status &>/dev/null; echo $PGDATA', shell=True, stdin=-1, stderr=-1, stdout=-1)
+
+    popenMock.reset()
+    p.communicate.return_value = (None, None)
+    utils.get_postgre_hba_dir('suse')
+    popenMock.assert_called_with('alias exit=return; source /etc/init.d/postgresql status &>/dev/null; echo $PGDATA', shell=True, stdin=-1, stderr=-1, stdout=-1)
 
 
   @patch('ambari_server.utils.get_ubuntu_pg_version')
   @patch('ambari_server.utils.get_ubuntu_pg_version')
   def test_get_postgre_running_status(self, get_ubuntu_pg_version_mock):
   def test_get_postgre_running_status(self, get_ubuntu_pg_version_mock):
     utils.PG_STATUS_RUNNING_DEFAULT = "red_running"
     utils.PG_STATUS_RUNNING_DEFAULT = "red_running"
     get_ubuntu_pg_version_mock.return_value = '9.1'
     get_ubuntu_pg_version_mock.return_value = '9.1'
 
 
-    self.assertEqual('9.1/main', utils.get_postgre_running_status('ubuntu'))
+    self.assertEqual('9.1/main', utils.get_postgre_running_status('debian'))
     self.assertEqual('red_running', utils.get_postgre_running_status('redhat'))
     self.assertEqual('red_running', utils.get_postgre_running_status('redhat'))
 
 
   @patch('os.path.isfile')
   @patch('os.path.isfile')

+ 4 - 9
ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py

@@ -21,7 +21,6 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
-@patch.object(OSCheck,"get_os_type", new = MagicMock(return_value='suse'))
 class TestDatanode(RMFTestCase):
 class TestDatanode(RMFTestCase):
 
 
   def test_configure_default(self):
   def test_configure_default(self):
@@ -52,9 +51,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -76,9 +74,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               action = ['delete'],
@@ -113,9 +110,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              user = 'root',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -137,9 +133,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'root',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               action = ['delete'],

+ 4 - 9
ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py

@@ -21,7 +21,6 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
-@patch.object(OSCheck,"get_os_type", new = MagicMock(return_value='suse'))
 class TestNamenode(RMFTestCase):
 class TestNamenode(RMFTestCase):
 
 
   def test_configure_default(self):
   def test_configure_default(self):
@@ -62,9 +61,8 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
     self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
                               tries = 40,
                               tries = 40,
@@ -110,9 +108,8 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
                               action = ['delete'],
                               action = ['delete'],
@@ -157,9 +154,8 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
                               user = 'hdfs',
                               user = 'hdfs',
@@ -208,9 +204,8 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
                               action = ['delete'],
                               action = ['delete'],

+ 4 - 9
ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py

@@ -21,7 +21,6 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
-@patch.object(OSCheck,"get_os_type", new = MagicMock(return_value='suse'))
 class TestSNamenode(RMFTestCase):
 class TestSNamenode(RMFTestCase):
 
 
   def test_configure_default(self):
   def test_configure_default(self):
@@ -62,9 +61,8 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -86,9 +84,8 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
                               action = ['delete'],
                               action = ['delete'],
@@ -133,9 +130,8 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -157,9 +153,8 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
                               action = ['delete'],
                               action = ['delete'],

+ 10 - 4
ambari-server/src/test/python/stacks/1.3.2/PIG/test_pig_client.py

@@ -37,8 +37,11 @@ class TestPigClient(RMFTestCase):
       owner = 'hdfs',
       owner = 'hdfs',
       content = InlineTemplate(self.getConfig()['configurations']['pig-env']['content'])
       content = InlineTemplate(self.getConfig()['configurations']['pig-env']['content'])
     )
     )
-    self.assertResourceCalled('TemplateConfig', '/etc/pig/conf/pig.properties',
-      owner = 'hdfs',
+    self.assertResourceCalled('File', '/etc/pig/conf/pig.properties',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              content = 'pigproperties\nline2'
     )
     )
     self.assertResourceCalled('File', '/etc/pig/conf/log4j.properties',
     self.assertResourceCalled('File', '/etc/pig/conf/log4j.properties',
       owner = 'hdfs',
       owner = 'hdfs',
@@ -63,8 +66,11 @@ class TestPigClient(RMFTestCase):
       owner = 'hdfs',
       owner = 'hdfs',
       content = InlineTemplate(self.getConfig()['configurations']['pig-env']['content']),
       content = InlineTemplate(self.getConfig()['configurations']['pig-env']['content']),
     )
     )
-    self.assertResourceCalled('TemplateConfig', '/etc/pig/conf/pig.properties',
-      owner = 'hdfs',
+    self.assertResourceCalled('File', '/etc/pig/conf/pig.properties',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              content = 'pigproperties\nline2'
     )
     )
     self.assertResourceCalled('File', '/etc/pig/conf/log4j.properties',
     self.assertResourceCalled('File', '/etc/pig/conf/log4j.properties',
       owner = 'hdfs',
       owner = 'hdfs',

+ 2 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json

@@ -296,7 +296,8 @@
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
         }, 
         }, 
         "sqoop-env": {
         "sqoop-env": {
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    "
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    ",
+            "sqoop_user": "sqoop"
         }, 
         }, 
         "mapred-env": {
         "mapred-env": {
             "mapreduce_userlog_retainhours": "24", 
             "mapreduce_userlog_retainhours": "24", 

+ 5 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/default.json

@@ -296,7 +296,8 @@
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
         }, 
         }, 
         "sqoop-env": {
         "sqoop-env": {
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    "
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    ",
+            "sqoop_user": "sqoop"
         }, 
         }, 
         "mapred-env": {
         "mapred-env": {
             "mapreduce_userlog_retainhours": "24", 
             "mapreduce_userlog_retainhours": "24", 
@@ -421,6 +422,9 @@
         "pig-log4j": {
         "pig-log4j": {
             "content": "log4jproperties\nline2"
             "content": "log4jproperties\nline2"
         },
         },
+        "pig-properties": {
+          "content": "pigproperties\nline2"
+        },
         "oozie-log4j": {
         "oozie-log4j": {
             "content": "log4jproperties\nline2"
             "content": "log4jproperties\nline2"
         }
         }

+ 2 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json

@@ -296,7 +296,8 @@
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
         }, 
         }, 
         "sqoop-env": {
         "sqoop-env": {
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    "
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    ",
+            "sqoop_user": "sqoop"
         }, 
         }, 
         "mapred-env": {
         "mapred-env": {
             "mapreduce_userlog_retainhours": "24", 
             "mapreduce_userlog_retainhours": "24", 

+ 5 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/secured.json

@@ -474,7 +474,8 @@
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
             "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n    "
         }, 
         }, 
         "sqoop-env": {
         "sqoop-env": {
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    "
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n    ",
+            "sqoop_user": "sqoop"
         }, 
         }, 
         "mapred-env": {
         "mapred-env": {
             "mapreduce_userlog_retainhours": "24", 
             "mapreduce_userlog_retainhours": "24", 
@@ -606,6 +607,9 @@
         "pig-log4j": {
         "pig-log4j": {
             "content": "log4jproperties\nline2"
             "content": "log4jproperties\nline2"
         },
         },
+        "pig-properties": {
+          "content": "pigproperties\nline2"
+        },
         "oozie-log4j": {
         "oozie-log4j": {
             "content": "log4jproperties\nline2"
             "content": "log4jproperties\nline2"
         }
         }

+ 5 - 0
ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py

@@ -133,5 +133,10 @@ class TestHookBeforeInstall(RMFTestCase):
         gid = 'hadoop',
         gid = 'hadoop',
         ignore_failures = False,
         ignore_failures = False,
     )
     )
+    self.assertResourceCalled('User', 'sqoop',
+        gid = 'hadoop',
+        ignore_failures = False,
+        groups = ['hadoop'],
+    )
     self.assertResourceCalled('Package', 'unzip',)
     self.assertResourceCalled('Package', 'unzip',)
     self.assertNoMoreResources()
     self.assertNoMoreResources()

+ 4 - 9
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py

@@ -21,7 +21,6 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
-@patch.object(OSCheck,"get_os_type", new = MagicMock(return_value='suse'))
 class TestDatanode(RMFTestCase):
 class TestDatanode(RMFTestCase):
 
 
   def test_configure_default(self):
   def test_configure_default(self):
@@ -52,9 +51,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -76,9 +74,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               action = ['delete'],
@@ -113,9 +110,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              user = 'root',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -137,9 +133,8 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'root',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               action = ['delete'],

+ 4 - 9
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py

@@ -21,7 +21,6 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
-@patch.object(OSCheck,"get_os_type", new = MagicMock(return_value='suse'))
 class TestJournalnode(RMFTestCase):
 class TestJournalnode(RMFTestCase):
 
 
   def test_configure_default(self):
   def test_configure_default(self):
@@ -52,9 +51,8 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -76,9 +74,8 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
                               action = ['delete'],
                               action = ['delete'],
@@ -113,9 +110,8 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
-                              user = 'hdfs',
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -137,9 +133,8 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode\'',
                               not_if = None,
                               not_if = None,
-                              user = 'hdfs',
                               )
                               )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
                               action = ['delete'],
                               action = ['delete'],

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini