Browse Source

AMBARI-10367 - [WinTP2] Merge HDPWIN HBASE package scripts to common services

Artem Baranchuk 10 năm trước cách đây
mục cha
commit
f126bec036
18 tập tin đã thay đổi với 416 bổ sung743 xóa
  1. 16 6
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
  2. 22 15
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
  3. 22 3
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
  4. 38 21
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
  5. 34 11
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
  6. 4 226
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
  7. 247 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
  8. 1 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
  9. 18 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
  10. 14 9
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
  11. 0 164
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb
  12. 0 30
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py
  13. 0 36
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py
  14. 0 66
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py
  15. 0 52
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py
  16. 0 49
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py
  17. 0 34
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py
  18. 0 21
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py

+ 16 - 6
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py

@@ -18,12 +18,24 @@ limitations under the License.
 
 """
 import os
-
 from resource_management import *
 import sys
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None):
+  import params
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site']
+  )
 
-def hbase(name=None # 'master' or 'regionserver' or 'client'
-              ):
+# name is 'master' or 'regionserver' or 'client'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None):
   import params
 
   Directory( params.hbase_conf_dir_prefix,
@@ -155,9 +167,7 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
     )
     params.HdfsDirectory(None, action="create")
 
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
+def hbase_TemplateConfig(name, tag=None):
   import params
 
   TemplateConfig( format("{hbase_conf_dir}/{name}"),

+ 22 - 15
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py

@@ -20,12 +20,32 @@ limitations under the License.
 
 import sys
 from resource_management import *
-
 from hbase import hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
 
-         
 class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
 
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseClientWindows(HbaseClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseClientDefault(HbaseClient):
   def get_stack_to_component(self):
     return {"HDP": "hbase-client"}
 
@@ -41,19 +61,6 @@ class HbaseClient(Script):
       # is also set
       Execute(format("hdp-select set hadoop-client {version}"))
 
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(name='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
 
 if __name__ == "__main__":
   HbaseClient().execute()

+ 22 - 3
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py

@@ -19,8 +19,30 @@ limitations under the License.
 """
 
 from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
 
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f")
+
+  hosts = params.hbase_excluded_hosts.split(",")
+  for host in hosts:
+    if host:
+      if params.hbase_drain_only == True:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+      else:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+        Execute(regionmover_cmd, user=params.hbase_user, logoutput=True)
 
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def hbase_decommission(env):
   import params
 
@@ -69,6 +91,3 @@ def hbase_decommission(env):
       pass
     pass
   pass
-  
-
-  pass

+ 38 - 21
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py

@@ -28,21 +28,49 @@ from hbase_service import hbase_service
 from hbase_decommission import hbase_decommission
 import upgrade
 from setup_ranger_hbase import setup_ranger_hbase
-         
-class HbaseMaster(Script):
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
 
-  def get_stack_to_component(self):
-    return {"HDP": "hbase-master"}
+
+class HbaseMaster(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='master')
 
   def install(self, env):
     self.install_packages(env)
-    
-  def configure(self, env):
+
+  def decommission(self, env):
     import params
     env.set_params(params)
+    hbase_decommission(env)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseMasterWindows(HbaseMaster):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_master_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseMasterDefault(HbaseMaster):
+  def get_stack_to_component(self):
+    return {"HDP": "hbase-master"}
 
-    hbase(name='master')
-    
   def pre_rolling_restart(self, env):
     import params
     env.set_params(params)
@@ -53,17 +81,12 @@ class HbaseMaster(Script):
     env.set_params(params)
     self.configure(env) # for security
     setup_ranger_hbase()  
-    hbase_service( 'master',
-      action = 'start'
-    )
+    hbase_service('master', action = 'start')
     
   def stop(self, env, rolling_restart=False):
     import params
     env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
+    hbase_service('master', action = 'stop')
 
   def status(self, env):
     import status_params
@@ -120,12 +143,6 @@ class HbaseMaster(Script):
     else:
       self.put_structured_out({"securityState": "UNSECURED"})
 
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_decommission(env)
-
 
 if __name__ == "__main__":
   HbaseMaster().execute()

+ 34 - 11
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py

@@ -27,22 +27,48 @@ from hbase import hbase
 from hbase_service import hbase_service
 import upgrade
 from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
 
-         
-class HbaseRegionServer(Script):
-
-  def get_stack_to_component(self):
-    return {"HDP": "hbase-regionserver"}
 
+class HbaseRegionServer(Script):
   def install(self, env):
     self.install_packages(env)
-    
+
   def configure(self, env):
     import params
     env.set_params(params)
-
     hbase(name='regionserver')
-      
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseRegionServerWindows(HbaseRegionServer):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_regionserver_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseRegionServerDefault(HbaseRegionServer):
+  def get_stack_to_component(self):
+    return {"HDP": "hbase-regionserver"}
+
   def pre_rolling_restart(self, env):
     import params
     env.set_params(params)
@@ -125,9 +151,6 @@ class HbaseRegionServer(Script):
     else:
       self.put_structured_out({"securityState": "UNSECURED"})
 
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
 
 if __name__ == "__main__":
   HbaseRegionServer().execute()

+ 4 - 226
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py

@@ -17,231 +17,9 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from ambari_commons import OSCheck
 
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from functions import calc_xmn_from_xms
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-exec_tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_name = default("/hostLevelParams/stack_name", None)
-
-version = default("/commandParams/version", None)
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
-  daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
-  region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
-  region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
-  hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
-else:
-  hadoop_bin_dir = "/usr/bin"
-  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-  hbase_cmd = "/usr/lib/hbase/bin/hbase"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hbase_conf_dir_prefix = "/etc/hbase"
-hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
-hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = default("/commandParams/mark_draining_only",False)
-hbase_included_hosts = config['commandParams']['included_hosts']
-
-hbase_user = status_params.hbase_user
-hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-# this is "hadoop-metrics.properties" for 1.x stacks
-metric_prop_file_name = "hadoop-metrics2-hbase.properties"
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['hbase-env']['hbase_log_dir']
-master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
-regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
-regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
-regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  hbase_max_direct_memory_size  = config['configurations']['hbase-env']['hbase_max_direct_memory_size']
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-# TODO UPGRADE default, update site during upgrade
-_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
-local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
-
-client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
-master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
-regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-has_metric_collector = not len(ams_collector_hosts) == 0
-if has_metric_collector:
-  metric_collector_host = ams_collector_hosts[0]
-  metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-  if metric_collector_port and metric_collector_port.find(':') != -1:
-    metric_collector_port = metric_collector_port.split(':')[1]
-  pass
-
-# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
-if 'slave_hosts' in config['clusterHostInfo']:
-  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+if OSCheck.is_windows_family():
+  from params_windows import *
 else:
-  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
-
-smoke_test_user = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
-smokeuser_permissions = "RWXCA"
-service_check_data = functions.get_unique_id_and_date()
-user_group = config['configurations']['cluster-env']["user_group"]
-
-if security_enabled:
-  _hostname_lowercase = config['hostname'].lower()
-  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path()
-if security_enabled:
-  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
-else:
-  kinit_cmd = ""
-
-#log4j.properties
-if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
-  log4j_props = config['configurations']['hbase-log4j']['content']
-else:
-  log4j_props = None
-  
-hbase_env_sh_template = config['configurations']['hbase-env']['content']
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
-hbase_staging_dir = "/apps/hbase/staging"
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path()
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  command_role = default("/role", "")
-  if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
-    role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
-
-    daemon_script=format("/usr/hdp/current/hbase-{role_root}/bin/hbase-daemon.sh")
-    region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
-    region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
-    hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  # Setting Flag value for ranger hbase plugin
-  enable_ranger_hbase = False
-  ranger_plugin_enable = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled","no")
-  if ranger_plugin_enable.lower() == 'yes':
-    enable_ranger_hbase = True
-  elif ranger_plugin_enable.lower() == 'no':
-    enable_ranger_hbase = False
-
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0    
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-
-# ranger hbase properties
-policymgr_mgr_url = default("/configurations/admin-properties/policymgr_external_url", "http://localhost:6080")
-sql_connector_jar = default("/configurations/admin-properties/SQL_CONNECTOR_JAR", "/usr/share/java/mysql-connector-java.jar")
-xa_audit_db_flavor = default("/configurations/admin-properties/DB_FLAVOR", "MYSQL")
-xa_audit_db_name = default("/configurations/admin-properties/audit_db_name", "ranger_audit")
-xa_audit_db_user = default("/configurations/admin-properties/audit_db_user", "rangerlogger")
-xa_audit_db_password = default("/configurations/admin-properties/audit_db_password", "rangerlogger")
-xa_db_host = default("/configurations/admin-properties/db_host", "localhost")
-repo_name = str(config['clusterName']) + '_hbase'
-db_enabled = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.DB.IS_ENABLED", "false")
-hdfs_enabled = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.IS_ENABLED", "false")
-hdfs_dest_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINATION_DIRECTORY", "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/app-type/time:yyyyMMdd")
-hdfs_buffer_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit")
-hdfs_archive_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit/archive")
-hdfs_dest_file = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FILE", "hostname-audit.log")
-hdfs_dest_flush_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS", "900")
-hdfs_dest_rollover_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS", "86400")
-hdfs_dest_open_retry_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS", "60")
-hdfs_buffer_file = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FILE", "time:yyyyMMdd-HHmm.ss.log")
-hdfs_buffer_flush_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS", "60")
-hdfs_buffer_rollover_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS", "600")
-hdfs_archive_max_file_count = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT", "10")
-ssl_keystore_file = default("/configurations/ranger-hbase-plugin-properties/SSL_KEYSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-keystore.jks")
-ssl_keystore_password = default("/configurations/ranger-hbase-plugin-properties/SSL_KEYSTORE_PASSWORD", "myKeyFilePassword")
-ssl_truststore_file = default("/configurations/ranger-hbase-plugin-properties/SSL_TRUSTSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-truststore.jks")
-ssl_truststore_password = default("/configurations/ranger-hbase-plugin-properties/SSL_TRUSTSTORE_PASSWORD", "changeit")
-grant_revoke = default("/configurations/ranger-hbase-plugin-properties/UPDATE_XAPOLICIES_ON_GRANT_REVOKE","true")
-common_name_for_certificate = default("/configurations/ranger-hbase-plugin-properties/common.name.for.certificate", "-")
-
-zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
-hbase_zookeeoer_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
-hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
-hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
-hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-
-repo_config_username = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_USERNAME", "hbase")
-repo_config_password = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_PASSWORD", "hbase")
-
-admin_uname = default("/configurations/ranger-env/admin_username", "admin")
-admin_password = default("/configurations/ranger-env/admin_password", "admin")
-admin_uname_password = format("{admin_uname}:{admin_password}")
-
-ambari_ranger_admin = default("/configurations/ranger-env/ranger_admin_username", "amb_ranger_admin")
-ambari_ranger_password = default("/configurations/ranger-env/ranger_admin_password", "ambari123")
-policy_user = default("/configurations/ranger-hbase-plugin-properties/policy_user", "ambari-qa")
-
-#For curl command in ranger plugin to get db connector
-jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
-if xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'mysql':
-  jdbc_symlink_name = "mysql-jdbc-driver.jar"
-  jdbc_jar_name = "mysql-connector-java.jar"
-elif xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'oracle':
-  jdbc_jar_name = "ojdbc6.jar"
-  jdbc_symlink_name = "oracle-jdbc-driver.jar"
-
-downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}")
-
-driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+  from params_linux import *

+ 247 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py

@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from functions import calc_xmn_from_xms
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+version = default("/commandParams/version", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
+  daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
+else:
+  hadoop_bin_dir = "/usr/bin"
+  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hbase_conf_dir_prefix = "/etc/hbase"
+hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hbase_max_direct_memory_size  = config['configurations']['hbase-env']['hbase_max_direct_memory_size']
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+# TODO UPGRADE default, update site during upgrade
+_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
+local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
+  metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+  if metric_collector_port and metric_collector_port.find(':') != -1:
+    metric_collector_port = metric_collector_port.split(':')[1]
+  pass
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+service_check_data = functions.get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path()
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+else:
+  kinit_cmd = ""
+
+#log4j.properties
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+kinit_path_local = functions.get_kinit_path()
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  command_role = default("/role", "")
+  if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
+    role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
+
+    daemon_script=format("/usr/hdp/current/hbase-{role_root}/bin/hbase-daemon.sh")
+    region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
+    region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
+    hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  # Setting Flag value for ranger hbase plugin
+  enable_ranger_hbase = False
+  ranger_plugin_enable = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled","no")
+  if ranger_plugin_enable.lower() == 'yes':
+    enable_ranger_hbase = True
+  elif ranger_plugin_enable.lower() == 'no':
+    enable_ranger_hbase = False
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0    
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+
+# ranger hbase properties
+policymgr_mgr_url = default("/configurations/admin-properties/policymgr_external_url", "http://localhost:6080")
+sql_connector_jar = default("/configurations/admin-properties/SQL_CONNECTOR_JAR", "/usr/share/java/mysql-connector-java.jar")
+xa_audit_db_flavor = default("/configurations/admin-properties/DB_FLAVOR", "MYSQL")
+xa_audit_db_name = default("/configurations/admin-properties/audit_db_name", "ranger_audit")
+xa_audit_db_user = default("/configurations/admin-properties/audit_db_user", "rangerlogger")
+xa_audit_db_password = default("/configurations/admin-properties/audit_db_password", "rangerlogger")
+xa_db_host = default("/configurations/admin-properties/db_host", "localhost")
+repo_name = str(config['clusterName']) + '_hbase'
+db_enabled = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.DB.IS_ENABLED", "false")
+hdfs_enabled = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.IS_ENABLED", "false")
+hdfs_dest_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINATION_DIRECTORY", "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/app-type/time:yyyyMMdd")
+hdfs_buffer_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit")
+hdfs_archive_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit/archive")
+hdfs_dest_file = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FILE", "hostname-audit.log")
+hdfs_dest_flush_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS", "900")
+hdfs_dest_rollover_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS", "86400")
+hdfs_dest_open_retry_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS", "60")
+hdfs_buffer_file = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FILE", "time:yyyyMMdd-HHmm.ss.log")
+hdfs_buffer_flush_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS", "60")
+hdfs_buffer_rollover_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS", "600")
+hdfs_archive_max_file_count = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT", "10")
+ssl_keystore_file = default("/configurations/ranger-hbase-plugin-properties/SSL_KEYSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-keystore.jks")
+ssl_keystore_password = default("/configurations/ranger-hbase-plugin-properties/SSL_KEYSTORE_PASSWORD", "myKeyFilePassword")
+ssl_truststore_file = default("/configurations/ranger-hbase-plugin-properties/SSL_TRUSTSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-truststore.jks")
+ssl_truststore_password = default("/configurations/ranger-hbase-plugin-properties/SSL_TRUSTSTORE_PASSWORD", "changeit")
+grant_revoke = default("/configurations/ranger-hbase-plugin-properties/UPDATE_XAPOLICIES_ON_GRANT_REVOKE","true")
+common_name_for_certificate = default("/configurations/ranger-hbase-plugin-properties/common.name.for.certificate", "-")
+
+zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+hbase_zookeeoer_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+
+repo_config_username = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_USERNAME", "hbase")
+repo_config_password = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_PASSWORD", "hbase")
+
+admin_uname = default("/configurations/ranger-env/admin_username", "admin")
+admin_password = default("/configurations/ranger-env/admin_password", "admin")
+admin_uname_password = format("{admin_uname}:{admin_password}")
+
+ambari_ranger_admin = default("/configurations/ranger-env/ranger_admin_username", "amb_ranger_admin")
+ambari_ranger_password = default("/configurations/ranger-env/ranger_admin_password", "ambari123")
+policy_user = default("/configurations/ranger-hbase-plugin-properties/policy_user", "ambari-qa")
+
+#For curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+java_share_dir = '/usr/share/java'
+if xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'mysql':
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'oracle':
+  jdbc_jar_name = "ojdbc6.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+
+downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}")
+
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")

+ 1 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py → ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py

@@ -20,6 +20,7 @@ limitations under the License.
 
 from resource_management import *
 import os
+import status_params
 
 # server configurations
 config = Script.get_config()

+ 18 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py

@@ -19,10 +19,28 @@ limitations under the License.
 """
 
 from resource_management import *
+from resource_management.libraries.functions.format import format
 import functions
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
 
 
 class HbaseServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseServiceCheckWindows(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    service = "HBASE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseServiceCheckDefault(HbaseServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)

+ 14 - 9
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py

@@ -19,18 +19,23 @@ limitations under the License.
 """
 
 from resource_management import *
+from ambari_commons import OSCheck
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
+if OSCheck.is_windows_family():
+  hbase_master_win_service_name = "master"
+  hbase_regionserver_win_service_name = "regionserver"
+else:
+  pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+  hbase_user = config['configurations']['hbase-env']['hbase_user']
 
-# Security related/required params
-hostname = config['hostname']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = functions.get_kinit_path()
-tmp_dir = Script.get_tmp_dir()
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = functions.get_kinit_path()
+  tmp_dir = Script.get_tmp_dir()
 
 
-hbase_conf_dir_prefix = "/etc/hbase"
-hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
+  hbase_conf_dir_prefix = "/etc/hbase"
+  hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")

+ 0 - 164
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb

@@ -1,164 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Add or remove servers from draining mode via zookeeper
-
-require 'optparse'
-include Java
-
-import org.apache.hadoop.hbase.HBaseConfiguration
-import org.apache.hadoop.hbase.client.HBaseAdmin
-import org.apache.hadoop.hbase.zookeeper.ZKUtil
-import org.apache.commons.logging.Log
-import org.apache.commons.logging.LogFactory
-
-# Name of this script
-NAME = "draining_servers"
-
-# Do command-line parsing
-options = {}
-optparse = OptionParser.new do |opts|
-  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
-  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
-                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
-  opts.on('-h', '--help', 'Display usage information') do
-    puts opts
-    exit
-  end
-  options[:debug] = false
-  opts.on('-d', '--debug', 'Display extra debug logging') do
-    options[:debug] = true
-  end
-end
-optparse.parse!
-
-# Return array of servernames where servername is hostname+port+startcode
-# comma-delimited
-def getServers(admin)
-  serverInfos = admin.getClusterStatus().getServerInfo()
-  servers = []
-  for server in serverInfos
-    servers << server.getServerName()
-  end
-  return servers
-end
-
-def getServerNames(hostOrServers, config)
-  ret = []
-
-  for hostOrServer in hostOrServers
-    # check whether it is already serverName. No need to connect to cluster
-    parts = hostOrServer.split(',')
-    if parts.size() == 3
-      ret << hostOrServer
-    else
-      admin = HBaseAdmin.new(config) if not admin
-      servers = getServers(admin)
-
-      hostOrServer = hostOrServer.gsub(/:/, ",")
-      for server in servers
-        ret << server if server.start_with?(hostOrServer)
-      end
-    end
-  end
-
-  admin.close() if admin
-  return ret
-end
-
-def addServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
-  servers = getServerNames(hostOrServers, config)
-
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-
-  begin
-    for server in servers
-      node = ZKUtil.joinZNode(parentZnode, server)
-      ZKUtil.createAndFailSilent(zkw, node)
-    end
-  ensure
-    zkw.close()
-  end
-end
-
-def removeServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
-  servers = getServerNames(hostOrServers, config)
-
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-
-  begin
-    for server in servers
-      node = ZKUtil.joinZNode(parentZnode, server)
-      ZKUtil.deleteNodeFailSilent(zkw, node)
-    end
-  ensure
-    zkw.close()
-  end
-end
-
-# list servers in draining mode
-def listServers(options)
-  config = HBaseConfiguration.create()
-
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-
-  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
-  servers.each {|server| puts server}
-end
-
-hostOrServers = ARGV[1..ARGV.size()]
-
-# Create a logger and disable the DEBUG-level annoying client logging
-def configureLogging(options)
-  apacheLogger = LogFactory.getLog(NAME)
-  # Configure log4j to not spew so much
-  unless (options[:debug])
-    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
-    logger.setLevel(org.apache.log4j.Level::WARN)
-    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
-    logger.setLevel(org.apache.log4j.Level::WARN)
-  end
-  return apacheLogger
-end
-
-# Create a logger and save it to ruby global
-$LOG = configureLogging(options)
-case ARGV[0]
-  when 'add'
-    if ARGV.length < 2
-      puts optparse
-      exit 1
-    end
-    addServers(options, hostOrServers)
-  when 'remove'
-    if ARGV.length < 2
-      puts optparse
-      exit 1
-    end
-    removeServers(options, hostOrServers)
-  when 'list'
-    listServers(options)
-  else
-    puts optparse
-    exit 3
-end

+ 0 - 30
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py

@@ -1,30 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def hbase():
-  import params
-  XmlConfig("hbase-site.xml",
-             conf_dir = params.hbase_conf_dir,
-             configurations = params.config['configurations']['hbase-site'],
-             configuration_attributes=params.config['configuration_attributes']['hbase-site']
-  )

+ 0 - 36
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py

@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-class HbaseClient(Script):
-  def install(self, env):
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HbaseClient().execute()

+ 0 - 66
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py

@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hbase_decomission():
-  import params
-
-  File(params.region_drainer,
-       content=StaticFile("draining_servers.rb"),
-       owner=params.hbase_user,
-       mode="f"
-  )
-  if params.hbase_drain_only == True:
-    hosts = params.hbase_excluded_hosts.split(",")
-    for host in hosts:
-      if host:
-        regiondrainer_cmd = format(
-          "cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
-        Execute(regiondrainer_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-        pass
-    pass
-
-  else:
-
-    hosts = params.hbase_excluded_hosts.split(",")
-    for host in hosts:
-      if host:
-        regiondrainer_cmd = format(
-          "cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
-        regionmover_cmd = format(
-          "cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
-
-        Execute(regiondrainer_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-
-        Execute(regionmover_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-      pass
-    pass
-  pass

+ 0 - 52
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py

@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hbase import hbase
-import service_mapping
-from hbase_decommission import *
-
-class HbaseMaster(Script):
-  def install(self, env):
-    if not check_windows_service_exists(service_mapping.hbase_master_win_service_name):
-      self.install_packages(env)
-
-  def configure(self, env):
-    hbase()
-
-  def start(self, env):
-    import params
-    self.configure(env) # for security
-    Service(service_mapping.hbase_master_win_service_name, action="start")
-
-  def stop(self, env):
-    Service(service_mapping.hbase_master_win_service_name, action="stop")
-
-  def status(self, env):
-    check_windows_service_status(service_mapping.hbase_master_win_service_name)
-
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-    hbase_decomission()
-
-if __name__ == "__main__":
-  HbaseMaster().execute()

+ 0 - 49
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py

@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hbase import hbase
-import service_mapping
-
-class HbaseRegionServer(Script):
-  def install(self, env):
-    if not check_windows_service_exists(service_mapping.hbase_regionserver_win_service_name):
-      self.install_packages(env)
-
-  def configure(self, env):
-    hbase()
-
-  def start(self, env):
-    import params
-    self.configure(env)
-    Service(service_mapping.hbase_regionserver_win_service_name, action="start")
-
-  def stop(self, env):
-    Service(service_mapping.hbase_regionserver_win_service_name, action="stop")
-
-  def status(self, env):
-    check_windows_service_status(service_mapping.hbase_regionserver_win_service_name)
-
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

+ 0 - 34
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py

@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.format import format
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
-    service = "HBASE"
-    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
-
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()

+ 0 - 21
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py

@@ -1,21 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-hbase_master_win_service_name = "master"
-hbase_regionserver_win_service_name = "regionserver"