Переглянути джерело

AMBARI-9599. Install fails with custom user/group (aonishuk)

Andrew Onishuk 10 роки тому
батько
коміт
58e1da97e1

+ 4 - 0
ambari-common/src/main/python/resource_management/core/providers/accounts.py

@@ -63,6 +63,10 @@ class UserProvider(Provider):
       if option_flag and option_value:
         command += [option_flag, str(option_value)]
 
+    # if trying to modify existing user, but no values to modify are provided
+    if self.user and len(command) == 1:
+      return
+
     command.append(self.resource.username)
 
     shell.checked_call(command, sudo=True)

+ 0 - 35
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java

@@ -302,44 +302,9 @@ public class UpgradeCatalog200 extends AbstractUpgradeCatalog {
     // remove NAGIOS to make way for the new embedded alert framework
     removeNagiosService();
     addNewConfigurationsFromXml();
-    updateDfsClusterAdmintistratorsProperty();
     updateHiveDatabaseType();
     setSecurityType();
   }
-  
-  protected void updateDfsClusterAdmintistratorsProperty() throws AmbariException {
-    /*
-     * Remove trailing and leading whitespaces from hdfs-site/dfs.cluster.administrators
-     * property.
-     */
-    AmbariManagementController ambariManagementController = injector.getInstance(
-        AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<String, String>();
-      String properyValue = null;
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          properyValue = null;
-          if (cluster.getDesiredConfigByType("hdfs-site") != null) {
-            properyValue = cluster.getDesiredConfigByType(
-                "hdfs-site").getProperties().get("dfs.cluster.administrators");
-          }
-
-          if (properyValue != null) {
-            properyValue = properyValue.trim();
-
-            prop.put("dfs.cluster.administrators", properyValue);
-            updateConfigurationPropertiesForCluster(cluster, "hdfs-site",
-                prop, true, false);
-          }
-        }
-      }
-    }
-  }
 
   protected void updateHiveDatabaseType() throws AmbariException {
     final String PROPERTY_NAME = "hive_database_type";

+ 1 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml

@@ -326,8 +326,7 @@
 
   <property>
     <name>dfs.cluster.administrators</name>
-    <value>hdfs</value>
-    <property-type>GROUP</property-type>
+    <value> hdfs</value>
     <description>ACL for who all can view the default servlets in the HDFS</description>
   </property>
 

+ 0 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -172,8 +172,6 @@ if has_tez:
   user_to_groups_dict[tez_user] = [proxyuser_group]
 if has_oozie_server:
   user_to_groups_dict[oozie_user] = [proxyuser_group]
-if has_namenode:
-  user_to_groups_dict[hdfs_user] = [dfs_cluster_administrators_group]
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
 

+ 35 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py

@@ -18,6 +18,8 @@ limitations under the License.
 """
 
 import os
+import re
+from copy import copy
 
 from resource_management import *
 
@@ -83,6 +85,39 @@ def setup_users():
                cd_access="a",
     )
     set_uid(params.hbase_user, params.hbase_user_dirs)
+
+  if params.has_namenode:
+    create_dfs_cluster_admins()
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  parts = re.split('\s', params.dfs_cluster_administrators_group)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].split(",") if parts[0] else []
+  groups_list = parts[1].split(",") if parts[1] else []
+
+  if users_list:
+    User(users_list,
+         ignore_failures = params.ignore_groupsusers_create
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+         ignore_failures = params.ignore_groupsusers_create
+    )
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list
+  )
+
+
+
     
 def set_uid(user, user_dirs):
   """

+ 0 - 5
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java

@@ -326,23 +326,18 @@ public class UpgradeCatalog200Test {
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod
         ("addNewConfigurationsFromXml");
     Method setSecurityType = UpgradeCatalog200.class.getDeclaredMethod("setSecurityType");
-    Method updateDfsClusterAdmintistratorsProperty = UpgradeCatalog200.class.getDeclaredMethod("updateDfsClusterAdmintistratorsProperty");
 
     UpgradeCatalog200 upgradeCatalog = createMockBuilder(UpgradeCatalog200.class)
         .addMockedMethod(removeNagiosService)
         .addMockedMethod(updateHiveDatabaseType)
         .addMockedMethod(addNewConfigurationsFromXml)
         .addMockedMethod(setSecurityType)
-        .addMockedMethod(updateDfsClusterAdmintistratorsProperty)
         .createMock();
 
     upgradeCatalog.removeNagiosService();
     expectLastCall().once();
     upgradeCatalog.addNewConfigurationsFromXml();
     expectLastCall();
-
-    upgradeCatalog.updateDfsClusterAdmintistratorsProperty();
-    expectLastCall();
     
     upgradeCatalog.updateHiveDatabaseType();
     expectLastCall().once();

+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/configs/default.json

@@ -241,7 +241,7 @@
             "dfs.block.access.token.enable": "true", 
             "dfs.support.append": "true", 
             "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
-            "dfs.cluster.administrators": " hdfs", 
+            "dfs.cluster.administrators": "test_user1,test_user2 hdfs,test_group",
             "dfs.replication": "3", 
             "ambari.dfs.datanode.http.port": "50075", 
             "dfs.datanode.balance.bandwidthPerSec": "6250000", 

+ 17 - 2
ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py

@@ -71,9 +71,9 @@ class TestHookBeforeInstall(RMFTestCase):
         groups = [u'hadoop'],
     )
     self.assertResourceCalled('User', 'hdfs',
-        gid = 'hadoop',
         ignore_failures = False,
-        groups = [u' hdfs'],
+        gid = 'hadoop',
+        groups = [u'hadoop'],
     )
     self.assertResourceCalled('User', 'storm',
         gid = 'hadoop',
@@ -140,6 +140,21 @@ class TestHookBeforeInstall(RMFTestCase):
     self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/hadoop/hbase',
         not_if = 'test $(id -u hbase) -gt 1000',
     )
+    self.assertResourceCalled('User', 'test_user1',
+        ignore_failures = False
+    )
+    self.assertResourceCalled('User', 'test_user2',
+        ignore_failures = False
+    )
+    self.assertResourceCalled('Group', 'hdfs',
+        ignore_failures = False,
+    )
+    self.assertResourceCalled('Group', 'test_group',
+        ignore_failures = False,
+    )
+    self.assertResourceCalled('User', 'hdfs',
+        groups = [u'hadoop', u'hdfs', u'test_group'],
+    )
     self.assertResourceCalled('Directory', '/etc/hadoop',
         mode = 0755
     )