123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177 |
- #!/usr/bin/env python
- """
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- """
- from functions import calc_xmn_from_xms
- from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
- from resource_management.libraries.functions.default import default
- from resource_management import *
- import status_params
- # server configurations
- config = Script.get_config()
- exec_tmp_dir = Script.get_tmp_dir()
- stack_name = default("/hostLevelParams/stack_name", None)
- version = default("/commandParams/version", None)
- stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
- hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
- #hadoop params
- if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
- hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
- daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
- region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
- region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
- hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
- else:
- hadoop_bin_dir = "/usr/bin"
- daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
- region_mover = "/usr/lib/hbase/bin/region_mover.rb"
- region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
- hbase_cmd = "/usr/lib/hbase/bin/hbase"
- hadoop_conf_dir = "/etc/hadoop/conf"
- hbase_conf_dir_prefix = "/etc/hbase"
- hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
- hbase_excluded_hosts = config['commandParams']['excluded_hosts']
- hbase_drain_only = default("/commandParams/mark_draining_only",False)
- hbase_included_hosts = config['commandParams']['included_hosts']
- hbase_user = status_params.hbase_user
- hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
- smokeuser = config['configurations']['cluster-env']['smokeuser']
- _authentication = config['configurations']['core-site']['hadoop.security.authentication']
- security_enabled = config['configurations']['cluster-env']['security_enabled']
- # this is "hadoop-metrics.properties" for 1.x stacks
- metric_prop_file_name = "hadoop-metrics2-hbase.properties"
- # not supporting 32 bit jdk.
- java64_home = config['hostLevelParams']['java_home']
- log_dir = config['configurations']['hbase-env']['hbase_log_dir']
- master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
- regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
- regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
- regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
- regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
- pid_dir = status_params.pid_dir
- tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
- # TODO UPGRADE default, update site during upgrade
- _local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
- local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
- client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
- master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
- regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
- ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
- ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
- ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
- has_metric_collector = not len(ams_collector_hosts) == 0
- if has_metric_collector:
- metric_collector_host = ams_collector_hosts[0]
- metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:8188")
- if metric_collector_port and metric_collector_port.find(':') != -1:
- metric_collector_port = metric_collector_port.split(':')[1]
- pass
- # if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
- if 'slave_hosts' in config['clusterHostInfo']:
- rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
- else:
- rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts')
- smoke_test_user = config['configurations']['cluster-env']['smokeuser']
- smokeuser_permissions = "RWXCA"
- service_check_data = functions.get_unique_id_and_date()
- user_group = config['configurations']['cluster-env']["user_group"]
- if security_enabled:
- _hostname_lowercase = config['hostname'].lower()
- master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
- regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
- master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
- regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
- smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
- hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
- kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
- if security_enabled:
- kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
- else:
- kinit_cmd = ""
- #log4j.properties
- if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
- log4j_props = config['configurations']['hbase-log4j']['content']
- else:
- log4j_props = None
-
- hbase_env_sh_template = config['configurations']['hbase-env']['content']
- hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
- hbase_staging_dir = "/apps/hbase/staging"
- #for create_hdfs_directory
- hostname = config["hostname"]
- hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
- hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
- hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
- kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
- import functools
- #create partial functions with common arguments for every HdfsDirectory call
- #to create hdfs directory we need to call params.HdfsDirectory in code
- HdfsDirectory = functools.partial(
- HdfsDirectory,
- conf_dir=hadoop_conf_dir,
- hdfs_user=hdfs_user,
- security_enabled = security_enabled,
- keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local,
- bin_dir = hadoop_bin_dir
- )
- if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
- command_role = default("/role", "")
- if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
- role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
- daemon_script=format("/usr/hdp/current/hbase-{role_root}/bin/hbase-daemon.sh")
- region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
- region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
- hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
- if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
- # Setting Flag value for ranger hbase plugin
- enable_ranger_hbase = False
- user_input = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']
- if user_input.lower() == 'yes':
- enable_ranger_hbase = True
- elif user_input.lower() == 'no':
- enable_ranger_hbase = False
- # ranger host
- ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
- has_ranger_admin = not len(ranger_admin_hosts) == 0
|