瀏覽代碼

AMBARI-19613. ZKFC Zookeper connection is not secure (Laszlo Puskas via magyari_sandor)

Laszlo Puskas 8 年之前
父節點
當前提交
00b2c42ccf

+ 5 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml

@@ -375,6 +375,11 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{
 if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
   ulimit -l {{datanode_max_locked_memory}}
 fi
+{% endif %}
+
+# Enable ACLs on zookeper znodes if required
+{% if hadoop_zkfc_opts is defined %}
+      export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS"
 {% endif %}
     </value>
     <value-attributes>

+ 2 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/kerberos.json

@@ -24,7 +24,8 @@
           "core-site": {
             "hadoop.security.authentication": "kerberos",
             "hadoop.security.authorization": "true",
-            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}",
+            "ha.zookeeper.acl":"sasl:nn:rwcda"
           }
         }
       ],

+ 24 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py

@@ -28,12 +28,12 @@ from resource_management.libraries.functions import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.core import shell
 from resource_management.core.shell import as_user, as_sudo
+from resource_management.core.source import Template
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
 from resource_management.libraries.functions.show_logs import show_logs
 from ambari_commons.inet_utils import ensure_ssl_using_protocol
 from zkfc_slave import ZkfcSlaveDefault
@@ -382,3 +382,25 @@ def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
   else:
     dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
   return dfsadmin_base_command
+
+
+def set_up_zkfc_security(params):
+    """ Sets up security for accessing zookeper on secure clusters """
+
+    # check if the namenode is HA (this may be redundant as the component is only installed if affirmative)
+    if params.dfs_ha_enabled is False:
+        Logger.info("The namenode is not HA, zkfc security setup skipped.")
+        return
+
+    # check if the cluster is secure (skip otherwise)
+    if params.security_enabled is False:
+        Logger.info("The cluster is not secure, zkfc security setup skipped.")
+        return
+
+    # process the JAAS template
+    File(os.path.join(params.hadoop_conf_secure_dir, 'hdfs_jaas.conf'),
+         owner='root',
+         group='root',
+         mode=0644,
+         content=Template("hdfs_jaas.conf.j2")
+         )

+ 5 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py

@@ -36,9 +36,9 @@ from resource_management.libraries.functions.security_commons import get_params_
 from resource_management.libraries.functions.security_commons import validate_security_config_properties
 from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
 from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version_select_util import get_component_version
+
+
 
 class ZkfcSlave(Script):
   def get_component_name(self):
@@ -61,6 +61,9 @@ class ZkfcSlave(Script):
     import params
     env.set_params(params)
     hdfs("zkfc_slave")
+
+    # set up failover /  zookeper ACLs
+    utils.set_up_zkfc_security(params)
     pass
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)

+ 27 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/hdfs_jaas.conf.j2

@@ -0,0 +1,27 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+      com.sun.security.auth.module.Krb5LoginModule required
+      useKeyTab=true
+      storeKey=true
+      useTicketCache=false
+      keyTab="{{nn_keytab}}"
+      principal="{{nn_principal_name}}";
+};
+

+ 9 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -31,9 +31,7 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.expect import expect
-from ambari_commons.os_check import OSCheck
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 
@@ -181,6 +179,8 @@ oozie_servers = default("/clusterHostInfo/oozie_server", [])
 falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+
 
 has_namenode = not len(namenode_host) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
@@ -190,9 +190,11 @@ has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
 has_ranger_admin = not len(ranger_admin_hosts) == 0
 has_zeppelin_master = not len(zeppelin_master_hosts) == 0
+has_zkfc_hosts = not len(zkfc_hosts)== 0
 
 if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+    hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+    hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
 
 hbase_tmp_dir = "/tmp/hbase-hbase"
 
@@ -235,3 +237,7 @@ host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
 override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
+
+# if NN HA on secure clutser, access Zookeper securely
+if has_zkfc_hosts and security_enabled:
+    hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml

@@ -179,6 +179,11 @@ export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
 if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
   ulimit -l {{datanode_max_locked_memory}}
 fi
+{% endif %}
+
+# Enable ACLs on zookeper znodes if required
+{% if hadoop_zkfc_opts is defined %}
+  export HADOOP_ZKFC_OPTS={{hadoop_zkfc_opts}}
 {% endif %}
     </value>
     <value-attributes>

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml

@@ -156,6 +156,11 @@ if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$H
   {% endif %}
   ulimit -n {{hdfs_user_nofile_limit}}
 fi
+
+# Enable ACLs on zookeper znodes if required
+{% if hadoop_zkfc_opts is defined %}
+  export HADOOP_ZKFC_OPTS={{hadoop_zkfc_opts}}
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

+ 5 - 0
ambari-server/src/main/resources/stacks/HDP/2.4/services/HDFS/configuration/hadoop-env.xml

@@ -156,6 +156,11 @@ if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$H
   {% endif %}
   ulimit -n {{hdfs_user_nofile_limit}}
 fi
+
+# Enable ACLs on zookeper znodes if required
+{% if hadoop_zkfc_opts is defined %}
+  export HADOOP_ZKFC_OPTS={{hadoop_zkfc_opts}}
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/kerberos.json

@@ -24,7 +24,8 @@
           "core-site": {
             "hadoop.security.authentication": "kerberos",
             "hadoop.security.authorization": "true",
-            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}",
+            "ha.zookeeper.acl":"sasl:nn:rwcda"
           }
         },
         {

+ 7 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py

@@ -174,6 +174,13 @@ class TestZkfc(RMFTestCase):
                               owner = 'root',
                               )
 
+    self.assertResourceCalled('File', '/etc/hadoop/conf/secure/hdfs_jaas.conf',
+                              owner='root',
+                              group='root',
+                              mode=0644,
+                              content=Template("hdfs_jaas.conf.j2")
+                              )
+
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',