params.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #!/usr/bin/env python
  2. """
  3. Licensed to the Apache Software Foundation (ASF) under one
  4. or more contributor license agreements. See the NOTICE file
  5. distributed with this work for additional information
  6. regarding copyright ownership. The ASF licenses this file
  7. to you under the Apache License, Version 2.0 (the
  8. "License"); you may not use this file except in compliance
  9. with the License. You may obtain a copy of the License at
  10. http://www.apache.org/licenses/LICENSE-2.0
  11. Unless required by applicable law or agreed to in writing, software
  12. distributed under the License is distributed on an "AS IS" BASIS,
  13. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. See the License for the specific language governing permissions and
  15. limitations under the License.
  16. """
  17. from functions import calc_xmn_from_xms
  18. from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
  19. from resource_management.libraries.functions.default import default
  20. from resource_management import *
  21. import status_params
  22. # server configurations
  23. config = Script.get_config()
  24. exec_tmp_dir = Script.get_tmp_dir()
  25. stack_name = default("/hostLevelParams/stack_name", None)
  26. version = default("/commandParams/version", None)
  27. stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
  28. hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
  29. #hadoop params
  30. if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
  31. hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
  32. daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
  33. region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
  34. region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
  35. hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
  36. else:
  37. hadoop_bin_dir = "/usr/bin"
  38. daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
  39. region_mover = "/usr/lib/hbase/bin/region_mover.rb"
  40. region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
  41. hbase_cmd = "/usr/lib/hbase/bin/hbase"
  42. hadoop_conf_dir = "/etc/hadoop/conf"
  43. hbase_conf_dir_prefix = "/etc/hbase"
  44. hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
  45. hbase_excluded_hosts = config['commandParams']['excluded_hosts']
  46. hbase_drain_only = default("/commandParams/mark_draining_only",False)
  47. hbase_included_hosts = config['commandParams']['included_hosts']
  48. hbase_user = status_params.hbase_user
  49. hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
  50. smokeuser = config['configurations']['cluster-env']['smokeuser']
  51. _authentication = config['configurations']['core-site']['hadoop.security.authentication']
  52. security_enabled = config['configurations']['cluster-env']['security_enabled']
  53. # this is "hadoop-metrics.properties" for 1.x stacks
  54. metric_prop_file_name = "hadoop-metrics2-hbase.properties"
  55. # not supporting 32 bit jdk.
  56. java64_home = config['hostLevelParams']['java_home']
  57. log_dir = config['configurations']['hbase-env']['hbase_log_dir']
  58. master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
  59. regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
  60. regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
  61. regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
  62. regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
  63. pid_dir = status_params.pid_dir
  64. tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
  65. # TODO UPGRADE default, update site during upgrade
  66. _local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
  67. local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
  68. client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
  69. master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
  70. regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
  71. ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
  72. ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
  73. ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
  74. has_metric_collector = not len(ams_collector_hosts) == 0
  75. if has_metric_collector:
  76. metric_collector_host = ams_collector_hosts[0]
  77. metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:8188")
  78. if metric_collector_port and metric_collector_port.find(':') != -1:
  79. metric_collector_port = metric_collector_port.split(':')[1]
  80. pass
  81. # if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
  82. if 'slave_hosts' in config['clusterHostInfo']:
  83. rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
  84. else:
  85. rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts')
  86. smoke_test_user = config['configurations']['cluster-env']['smokeuser']
  87. smokeuser_permissions = "RWXCA"
  88. service_check_data = functions.get_unique_id_and_date()
  89. user_group = config['configurations']['cluster-env']["user_group"]
  90. if security_enabled:
  91. _hostname_lowercase = config['hostname'].lower()
  92. master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
  93. regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
  94. master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
  95. regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
  96. smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
  97. hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
  98. kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
  99. if security_enabled:
  100. kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
  101. else:
  102. kinit_cmd = ""
  103. #log4j.properties
  104. if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
  105. log4j_props = config['configurations']['hbase-log4j']['content']
  106. else:
  107. log4j_props = None
  108. hbase_env_sh_template = config['configurations']['hbase-env']['content']
  109. hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
  110. hbase_staging_dir = "/apps/hbase/staging"
  111. #for create_hdfs_directory
  112. hostname = config["hostname"]
  113. hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
  114. hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
  115. hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
  116. kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
  117. import functools
  118. #create partial functions with common arguments for every HdfsDirectory call
  119. #to create hdfs directory we need to call params.HdfsDirectory in code
  120. HdfsDirectory = functools.partial(
  121. HdfsDirectory,
  122. conf_dir=hadoop_conf_dir,
  123. hdfs_user=hdfs_user,
  124. security_enabled = security_enabled,
  125. keytab = hdfs_user_keytab,
  126. kinit_path_local = kinit_path_local,
  127. bin_dir = hadoop_bin_dir
  128. )
  129. if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
  130. command_role = default("/role", "")
  131. if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
  132. role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
  133. daemon_script=format("/usr/hdp/current/hbase-{role_root}/bin/hbase-daemon.sh")
  134. region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
  135. region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
  136. hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
  137. if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
  138. # Setting Flag value for ranger hbase plugin
  139. enable_ranger_hbase = False
  140. user_input = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']
  141. if user_input.lower() == 'yes':
  142. enable_ranger_hbase = True
  143. elif user_input.lower() == 'no':
  144. enable_ranger_hbase = False
  145. # ranger host
  146. ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
  147. has_ranger_admin = not len(ranger_admin_hosts) == 0