浏览代码

AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)

Jun Aoki 10 年之前
父节点
当前提交
32b1fc38e3
共有 100 个文件被更改,包括 1470 次插入1627 次删除
  1. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
  2. 15 10
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
  3. 5 19
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
  4. 0 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/files/changeToSecureUid.sh
  5. 2 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py
  6. 103 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
  7. 56 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py
  8. 0 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py
  9. 5 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
  10. 5 36
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
  11. 4 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh
  12. 0 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py
  13. 18 6
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
  14. 7 5
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
  15. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml
  16. 1 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
  17. 38 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
  18. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml
  19. 7 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py
  20. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py
  21. 11 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py
  22. 5 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
  23. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh
  24. 2 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
  25. 18 13
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py
  26. 0 39
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
  27. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml
  28. 2 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
  29. 31 31
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py
  30. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py
  31. 24 9
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py
  32. 3 3
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py
  33. 6 4
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
  34. 10 84
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
  35. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml
  36. 3 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh
  37. 9 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py
  38. 18 9
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py
  39. 9 6
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
  40. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py
  41. 33 11
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
  42. 22 13
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py
  43. 88 5
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
  44. 57 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hcat-env.xml
  45. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml
  46. 248 36
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
  47. 2 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-env.xml
  48. 0 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-site.xml
  49. 79 74
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml
  50. 5 5
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/files/templetonSmoke.sh
  51. 12 9
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py
  52. 6 4
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py
  53. 19 16
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py
  54. 15 19
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
  55. 23 14
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py
  56. 88 20
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
  57. 6 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py
  58. 2 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
  59. 6 7
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py
  60. 1 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py
  61. 40 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat.py
  62. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
  63. 2 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service.py
  64. 14 14
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service_check.py
  65. 0 43
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/hcat-env.sh.j2
  66. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/startHiveserver2.sh.j2
  67. 0 66
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
  68. 0 66
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py
  69. 0 36
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout_client.py
  70. 0 55
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/params.py
  71. 0 92
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/service_check.py
  72. 0 34
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/templates/mahout-env.sh.j2
  73. 2 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/configuration/oozie-env.xml
  74. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/configuration/oozie-log4j.xml
  75. 1 13
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/metainfo.xml
  76. 19 14
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/files/oozieSmoke2.sh
  77. 52 22
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie.py
  78. 2 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_client.py
  79. 3 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_server.py
  80. 11 12
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_service.py
  81. 67 21
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/params.py
  82. 3 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/service_check.py
  83. 81 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/templates/catalina.properties.j2
  84. 1 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/templates/oozie-log4j.properties.j2
  85. 14 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/PIG/package/scripts/params.py
  86. 1 0
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/PIG/package/scripts/pig.py
  87. 6 4
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/PIG/package/scripts/service_check.py
  88. 0 54
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/configuration/sqoop-env.xml
  89. 0 92
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/metainfo.xml
  90. 0 19
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/__init__.py
  91. 0 37
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/params.py
  92. 0 37
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/service_check.py
  93. 0 57
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/sqoop.py
  94. 0 107
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/metainfo.xml
  95. 0 20
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py
  96. 0 83
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/params.py
  97. 0 45
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/service_check.py
  98. 0 26
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/status_params.py
  99. 0 6
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml
  100. 9 1
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py

@@ -28,7 +28,7 @@ class AfterInstallHook(Hook):
     import params
 
     env.set_params(params)
-    setup_hadoop_env()
+    setup_hdp_install_directory()
     setup_config()
 
 if __name__ == "__main__":

+ 15 - 10
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py

@@ -19,24 +19,31 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.core.system import System
-import os
 
 config = Script.get_config()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+versioned_hdp_root = '/usr/bigtop/current'
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 #java params
 java_home = config['hostLevelParams']['java_home']
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
 
 if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
   # deprecated rhel jsvc_path
@@ -58,8 +65,6 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 #users and groups
@@ -67,4 +72,4 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 user_group = config['configurations']['cluster-env']['user_group']
 
 namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
+has_namenode = not len(namenode_host) == 0

+ 5 - 19
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -19,25 +19,11 @@ limitations under the License.
 import os
 from resource_management import *
 
-def setup_hadoop_env():
+def setup_hdp_install_directory():
   import params
-  if params.has_namenode:
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-    Directory(params.hadoop_conf_empty_dir,
-              recursive=True,
-              owner='root',
-              group='root'
-    )
-    Link(params.hadoop_conf_dir,
-         to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}")
-    )
-    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
-         owner=tc_owner,
-         content=InlineTemplate(params.hadoop_env_sh_template)
+  if params.rpm_version:
+    Execute(format('ambari-python-wrap /usr/bin/bigtop-select set all `ambari-python-wrap /usr/bin/bigtop-select versions | grep ^{rpm_version}- | tail -1`'),
+            only_if=format('ls -d /usr/bigtop/{rpm_version}-*')
     )
 
 def setup_config():
@@ -49,4 +35,4 @@ def setup_config():
               configuration_attributes=params.config['configuration_attributes']['core-site'],
               owner=params.hdfs_user,
               group=params.user_group
-  )
+    )

+ 0 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/files/changeToSecureUid.sh → ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/files/changeToSecureUid.sh


+ 2 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/hook.py

@@ -27,6 +27,8 @@ class BeforeAnyHook(Hook):
     env.set_params(params)
     
     setup_jce()
+    setup_users()
+    setup_hadoop_env()
 
 if __name__ == "__main__":
   BeforeAnyHook().execute()

+ 103 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py

@@ -18,12 +18,12 @@ limitations under the License.
 """
 
 from resource_management import *
+import collections
+import json
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
 jce_location = config['hostLevelParams']['jdk_location']
@@ -31,3 +31,104 @@ jdk_name = default("/hostLevelParams/jdk_name", None)
 java_home = config['hostLevelParams']['java_home']
 
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_home = "/usr/bigtop/current/hadoop-client"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_home = "/usr/lib/hadoop"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+versioned_hdp_root = '/usr/bigtop/current'
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
+  # deprecated rhel jsvc_path
+  jsvc_path = "/usr/libexec/bigtop-utils"
+else:
+  jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+
+has_namenode = not len(namenode_host) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+nagios_group = config['configurations']['nagios-env']['nagios_group']
+
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+if has_nagios:
+  user_to_gid_dict[nagios_user] = nagios_group
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])

+ 56 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/shared_initialization.py

@@ -56,3 +56,59 @@ def setup_jce():
             cwd  = security_dir,
             path = ['/bin/','/usr/bin']
     )
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+  
+  for group in params.group_list:
+    Group(group,
+        ignore_failures = params.ignore_groupsusers_create
+    )
+    
+  for user in params.user_list:
+    User(user,
+        gid = params.user_to_gid_dict[user],
+        groups = params.user_to_groups_dict[user],
+        ignore_failures = params.ignore_groupsusers_create       
+    )
+           
+  set_uid(params.smoke_user, params.smoke_user_dirs)
+
+  if params.has_hbase_masters:
+    set_uid(params.hbase_user, params.hbase_user_dirs)
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} 2>/dev/null"),
+          not_if = format("test $(id -u {user}) -gt 1000"))
+    
+def setup_hadoop_env():
+  import params
+  if params.has_namenode:
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+    Directory(params.hadoop_conf_empty_dir,
+              recursive=True,
+              owner='root',
+              group='root'
+    )
+    Link(params.hadoop_conf_dir,
+         to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}")
+    )
+    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
+         owner=tc_owner,
+         content=InlineTemplate(params.hadoop_env_sh_template)
+    )

+ 0 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/hook.py

@@ -33,7 +33,6 @@ class BeforeInstallHook(Hook):
     install_repos()
     install_packages()
     setup_java()
-    setup_users()
 
 if __name__ == "__main__":
   BeforeInstallHook().execute()

+ 5 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py

@@ -19,13 +19,15 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.core.system import System
-import os
 import json
 import collections
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
@@ -36,6 +38,8 @@ tez_user = config['configurations']['tez-env']["tez_user"]
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
 #hosts
 hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

+ 5 - 36
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -21,41 +21,6 @@ import os
 
 from resource_management import *
 
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-  
-  for group in params.group_list:
-    Group(group,
-        ignore_failures = params.ignore_groupsusers_create
-    )
-    
-  for user in params.user_list:
-    User(user,
-        gid = params.user_to_gid_dict[user],
-        groups = params.user_to_groups_dict[user],
-        ignore_failures = params.ignore_groupsusers_create       
-    )
-           
-  set_uid(params.smoke_user, params.smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    set_uid(params.hbase_user, params.hbase_user_dirs)
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-  
 def setup_java():
   """
   Installs jdk using specific params, that comes from ambari-server
@@ -91,4 +56,8 @@ def setup_java():
   )
 
 def install_packages():
-  Package(['unzip', 'curl'])
+  import params
+  packages = ['unzip', 'curl']
+  if params.rpm_version:
+    packages.append('bigtop-select')
+  Package(packages)

+ 4 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/files/checkForFormat.sh

@@ -24,6 +24,8 @@ export hdfs_user=$1
 shift
 export conf_dir=$1
 shift
+export bin_dir=$1
+shift
 export mark_dir=$1
 shift
 export name_dirs=$*
@@ -50,7 +52,8 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    export PATH=$PATH:$bin_dir
+    su -s /bin/bash - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
   fi

+ 0 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/hook.py

@@ -27,7 +27,6 @@ class BeforeStartHook(Hook):
     import params
 
     self.run_custom_hook('before-ANY')
-    self.run_custom_hook('after-INSTALL')
     env.set_params(params)
 
     setup_hadoop()

+ 18 - 6
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py

@@ -23,6 +23,24 @@ import os
 
 config = Script.get_config()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_lib_home = "/usr/bigtop/current/hadoop-client/lib"
+  hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
+  hadoop_home = '/usr/bigtop/current/hadoop-client'
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_lib_home = "/usr/lib/hadoop/lib"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_home = '/usr'
+
+hadoop_conf_dir = "/etc/hadoop/conf"
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
@@ -70,11 +88,7 @@ if has_ganglia_server:
 
 if has_namenode:
   hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
 
@@ -127,8 +141,6 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 #log4j.properties

+ 7 - 5
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py

@@ -38,7 +38,8 @@ def setup_hadoop():
     Directory(params.hdfs_log_dir_prefix,
               recursive=True,
               owner='root',
-              group='root'
+              group=params.user_group,
+              mode=0775
     )
     Directory(params.hadoop_pid_dir_prefix,
               recursive=True,
@@ -162,10 +163,11 @@ def install_snappy():
   so_src_dir_x64 = format("{hadoop_home}/lib64")
   so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
   so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  Execute(
-    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-  Execute(
-    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))
+  if params.has_namenode:
+    Execute(
+      format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
+    Execute(
+      format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))
 
 
 def create_javahome_symlink():

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/repos/repoinfo.xml

@@ -21,7 +21,7 @@
   -->
   <os family="redhat6">
     <repo>
-      <baseurl>http://bigtop01.cloudera.org:8080/job/Bigtop-trunk-Repository/label=centos6/lastSuccessfulBuild/artifact/repo/</baseurl>
+      <baseurl>http://bigtop.s3.amazonaws.com/releases/0.8.0/redhat/6/x86_64</baseurl>
       <repoid>BIGTOP-0.8</repoid>
       <reponame>BIGTOP</reponame>
     </repo>

+ 1 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json

@@ -13,9 +13,8 @@
     "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],

+ 38 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml

@@ -37,4 +37,42 @@
     <property-type>USER</property-type>
     <description>Flume User</description>
   </property>
+
+  <!-- flume-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for flume-env.sh file</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
+# during Flume startup.
+
+# Enviroment variables can be set here.
+
+export JAVA_HOME={{java_home}}
+
+# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
+# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
+
+# Note that the Flume conf directory is always included in the classpath.
+#FLUME_CLASSPATH=""
+
+# export HIVE_HOME=/usr/lib/hive
+# export HCAT_HOME=/usr/lib/hive-hcatalog
+    </value>
+  </property>
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/metainfo.xml

@@ -22,7 +22,7 @@
       <name>FLUME</name>
       <displayName>Flume</displayName>
       <comment>A distributed service for collecting, aggregating, and moving large amounts of streaming data into HDFS</comment>
-      <version>1.4.0.2.0</version>
+      <version>1.5.0.1.671</version>
       <components>
         <component>
           <name>FLUME_HANDLER</name>

+ 7 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume.py

@@ -30,9 +30,14 @@ def flume(action = None):
     for n in find_expected_agent_names():
       os.unlink(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'))
 
-    Directory(params.flume_conf_dir)
+    Directory(params.flume_conf_dir, recursive=True)
     Directory(params.flume_log_dir, owner=params.flume_user)
 
+    File(format("{flume_conf_dir}/flume-env.sh"),
+         owner=params.flume_user,
+         content=InlineTemplate(params.flume_env_sh_template)
+    )
+
     flume_agents = {}
     if params.flume_conf_content is not None:
       flume_agents = build_flume_topology(params.flume_conf_content)
@@ -63,7 +68,7 @@ def flume(action = None):
       _set_desired_state('STARTED')
       
     flume_base = format('su -s /bin/bash {flume_user} -c "export JAVA_HOME={java_home}; '
-      '/usr/bin/flume-ng agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
+      '{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
 
     for agent in cmd_target_names():
       flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/flume_check.py

@@ -31,7 +31,7 @@ class FlumeServiceCheck(Script):
       Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
               user=params.smoke_user)
 
-    Execute(format('env JAVA_HOME={java_home} /usr/bin/flume-ng version'),
+    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
             logoutput=True,
             tries = 3,
             try_sleep = 20)

+ 11 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/package/scripts/params.py

@@ -26,9 +26,17 @@ proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 security_enabled = False
 
-java_home = config['hostLevelParams']['java_home']
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  flume_bin = '/usr/bigtop/current/flume-client/bin/flume-ng'
+else:
+  flume_bin = '/usr/bin/flume-ng'
 
 flume_conf_dir = '/etc/flume/conf'
+java_home = config['hostLevelParams']['java_home']
 flume_log_dir = '/var/log/flume'
 flume_run_dir = '/var/run/flume'
 flume_user = 'flume'
@@ -50,6 +58,8 @@ else:
 targets = default('/commandParams/flume_handler', None)
 flume_command_targets = [] if targets is None else targets.split(',')
 
+flume_env_sh_template = config['configurations']['flume-env']['content']
+
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', [])
 ganglia_server_host = None
 if 0 != len(ganglia_server_hosts):

+ 5 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml

@@ -68,5 +68,10 @@
     <value>4</value>
     <description>(-t) Specifies the number of threads used for writing RRD files. The default is 4. Increasing this number will allow rrdcached to have more simultaneous I/O requests into the kernel. This may allow the kernel to re-order disk writes, resulting in better disk throughput.</description>
   </property>
+  <property>
+    <name>additional_clusters</name>
+    <value> </value>
+    <description>Add additional desired Ganglia metrics cluster in the form "name1:port1,name2:port2". Ensure that the names and ports are unique across all cluster and ports are available on ganglia server host. Ambari has reserved ports 8667-8669 within its own pool.</description>
+  </property>
 
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/files/startRrdcached.sh

@@ -31,7 +31,7 @@ rrdcachedRunningPid=`getRrdcachedRunningPid`;
 # Only attempt to start rrdcached if there's not already one running.
 if [ -z "${rrdcachedRunningPid}" ]
 then
-    su - ${GMETAD_USER} -s /bin/bash -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    su -s /bin/bash - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
              -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
              -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
              -b ${RRDCACHED_BASE_DIR} -B -t ${RRDCACHED_WRITE_THREADS} \

+ 2 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/ganglia_monitor.py

@@ -110,12 +110,12 @@ class GangliaMonitor(Script):
 
     for gmond_app in params.gmond_apps:
       generate_daemon("gmond",
-                      name=gmond_app,
+                      name=gmond_app[0],
                       role="server",
                       owner="root",
                       group=params.user_group)
       generate_daemon("gmond",
-                      name = gmond_app,
+                      name = gmond_app[0],
                       role = "monitor",
                       owner = "root",
                       group = params.user_group)

+ 18 - 13
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/package/scripts/params.py

@@ -31,11 +31,16 @@ ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
 gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
 gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
-gmond_app_str = default("/configurations/hadoop-env/enabled_app_servers", None)
-gmond_apps = [] if gmond_app_str is None else gmond_app_str.split(',')
-gmond_apps = [x.strip() for x in gmond_apps]
-gmond_allowed_apps = ["Application1", "Application2", "Application3"]
-gmond_apps = set(gmond_apps) & set(gmond_allowed_apps)
+gmond_add_clusters_str = default("/configurations/ganglia-env/additional_clusters", None)
+if gmond_add_clusters_str and gmond_add_clusters_str.isspace():
+  gmond_add_clusters_str = None
+
+gmond_app_strs = [] if gmond_add_clusters_str is None else gmond_add_clusters_str.split(',')
+gmond_apps = []
+
+for x in gmond_app_strs:
+  a,b = x.strip().split(':')
+  gmond_apps.append((a.strip(),b.strip()))
 
 if System.get_instance().os_family == "ubuntu":
   gmond_service_name = "ganglia-monitor"
@@ -103,12 +108,12 @@ has_nimbus_server = not len(nimbus_server_hosts) == 0
 has_supervisor_server = not len(supervisor_server_hosts) == 0
 
 ganglia_cluster_names = {
-  "jtnode_host": [("HDPJournalNode", 8654)],
+  "jn_hosts": [("HDPJournalNode", 8654)],
   "flume_hosts": [("HDPFlumeServer", 8655)],
   "hbase_rs_hosts": [("HDPHBaseRegionServer", 8656)],
   "nm_hosts": [("HDPNodeManager", 8657)],
   "mapred_tt_hosts": [("HDPTaskTracker", 8658)],
-  "slave_hosts": [("HDPDataNode", 8659), ("HDPSlaves", 8660)],
+  "slave_hosts": [("HDPDataNode", 8659)],
   "namenode_host": [("HDPNameNode", 8661)],
   "jtnode_host": [("HDPJobTracker", 8662)],
   "hbase_master_hosts": [("HDPHBaseMaster", 8663)],
@@ -116,12 +121,12 @@ ganglia_cluster_names = {
   "hs_host": [("HDPHistoryServer", 8666)],
   "nimbus_hosts": [("HDPNimbus", 8649)],
   "supervisor_hosts": [("HDPSupervisor", 8650)],
-  "Application1": [("Application1", 8667)],
-  "Application2": [("Application2", 8668)],
-  "Application3": [("Application3", 8669)]
+  "ReservedPort1": [("ReservedPort1", 8667)],
+  "ReservedPort2": [("ReservedPort2", 8668)],
+  "ReservedPort3": [("ReservedPort3", 8669)]
 }
 
-ganglia_clusters = []
+ganglia_clusters = [("HDPSlaves", 8660)]
 
 for key in ganglia_cluster_names:
   property_name = format("/clusterHostInfo/{key}")
@@ -129,10 +134,10 @@ for key in ganglia_cluster_names:
   if not len(hosts) == 0:
     for x in ganglia_cluster_names[key]:
       ganglia_clusters.append(x)
+
 if len(gmond_apps) > 0:
   for gmond_app in gmond_apps:
-    for x in ganglia_cluster_names[gmond_app]:
-      ganglia_clusters.append(x)
+    ganglia_clusters.append(gmond_app)
 
 ganglia_apache_config_file = "/etc/apache2/conf.d/ganglia.conf"
 ganglia_web_path="/var/www/html/ganglia"

+ 0 - 39
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml

@@ -222,45 +222,6 @@
     </description>
   </property>
 
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value>/etc/security/keytabs/hbase.service.keytab</value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value>hbase/_HOST@EXAMPLE.COM</value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value>/etc/security/keytabs/hbase.service.keytab</value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value>hbase/_HOST@EXAMPLE.COM</value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
   <!-- Additional configuration specific to HBase security -->
   <property>
     <name>hbase.superuser</name>

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/metainfo.xml

@@ -24,7 +24,7 @@
       <comment>Non-relational distributed database and centralized service for configuration management &amp;
         synchronization
       </comment>
-      <version>0.98.2.686</version>
+      <version>0.98.2.687</version>
       <components>
         <component>
           <name>HBASE_MASTER</name>

+ 2 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/files/hbaseSmokeVerify.sh

@@ -21,7 +21,8 @@
 #
 conf_dir=$1
 data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
 cat /tmp/hbase_chk_verify
 echo "Looking for $data"
 grep -q $data /tmp/hbase_chk_verify

+ 31 - 31
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_decommission.py

@@ -33,42 +33,42 @@ def hbase_decommission(env):
   )
   
   if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
 
-    if params.hbase_drain_only == 'true':
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-          pass
-      pass
-
-    else:
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
 
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
-          regionmover_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
 
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
 
-          Execute(regionmover_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-        pass
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
       pass
     pass
-
+  pass
+  
 
   pass

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/hbase_service.py

@@ -44,7 +44,7 @@ def hbase_service(
       Execute ( daemon_cmd,
         user = params.hbase_user,
         # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
-        timeout = 30,
+        timeout = 60,
         on_timeout = format("{no_op_test} && kill -9 `cat {pid_file}`")
       )
       

+ 24 - 9
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/params.py

@@ -26,16 +26,31 @@ import status_params
 config = Script.get_config()
 exec_tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  hadoop_bin_dir = format("/usr/bigtop/current/hadoop-client/bin")
+  daemon_script = format('/usr/bigtop/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('/usr/bigtop/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('/usr/bigtop/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('/usr/bigtop/current/hbase-client/bin/hbase')
+else:
+  hadoop_bin_dir = "/usr/bin"
+  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
 hbase_conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-hbase_cmd = "/usr/lib/hbase/bin/hbase"
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = config['commandParams']['mark_draining_only']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
 hbase_included_hosts = config['commandParams']['included_hosts']
 
 hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
@@ -72,7 +87,7 @@ if 'slave_hosts' in config['clusterHostInfo']:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
 else:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
-  
+
 smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
@@ -89,7 +104,7 @@ smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
-  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
 else:
   kinit_cmd = ""
 
@@ -105,7 +120,6 @@ hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -119,5 +133,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )

+ 3 - 3
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/package/scripts/service_check.py

@@ -44,7 +44,7 @@ class HbaseServiceCheck(Script):
     
     if params.security_enabled:    
       hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
-      grantprivelegecmd = format("{kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
   
       File( hbase_grant_premissions_file,
         owner   = params.hbase_user,
@@ -57,8 +57,8 @@ class HbaseServiceCheck(Script):
         user = params.hbase_user,
       )
 
-    servicecheckcmd = format("{smokeuser_kinit_cmd} hbase --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data}")
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
   
     Execute( servicecheckcmd,
       tries     = 3,

+ 6 - 4
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml

@@ -83,7 +83,12 @@
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
   </property>
-  
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
   <!-- hadoop-env.sh -->
   <property>
     <name>content</name>
@@ -199,9 +204,6 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
 #Mostly required for hadoop 2.0
 export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
-
-#Hadoop logging options
-export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
     </value>
   </property>
   

+ 10 - 84
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml

@@ -297,40 +297,6 @@
     </description>
   </property>
 
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value>nn/_HOST@EXAMPLE.COM</value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value>nn/_HOST@EXAMPLE.COM</value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
@@ -338,56 +304,6 @@
     <description>Address of secondary namenode web server</description>
   </property>
 
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@EXAMPLE.COM</value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/etc/security/keytabs/nn.service.keytab</value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/etc/security/keytabs/nn.service.keytab</value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/etc/security/keytabs/dn.service.keytab</value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
 
   <property>
     <name>dfs.namenode.https-address</name>
@@ -501,4 +417,14 @@
       When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
   </property>
 
+  <property>
+    <name>dfs.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
+      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
+    </description>
+  </property>
+
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/metainfo.xml

@@ -22,7 +22,7 @@
       <name>HDFS</name>
       <displayName>HDFS</displayName>
       <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.4.0.724</version>
+      <version>2.4.1.726</version>
 
       <components>
         <component>

+ 3 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/files/checkForFormat.sh

@@ -24,6 +24,8 @@ export hdfs_user=$1
 shift
 export conf_dir=$1
 shift
+export bin_dir=$1
+shift
 export old_mark_dir=$1
 shift
 export mark_dir=$1
@@ -56,7 +58,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    su -s /bin/bash - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | hadoop --config ${conf_dir} ${command}"
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
   fi

+ 9 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs.py

@@ -65,6 +65,15 @@ def hdfs(name=None):
             group=params.user_group
   )
 
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
   File(os.path.join(params.hadoop_conf_dir, 'slaves'),
        owner=tc_owner,
        content=Template("slaves.j2")

+ 18 - 9
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_datanode.py

@@ -18,25 +18,34 @@ limitations under the License.
 """
 
 from resource_management import *
+from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
 from utils import service
 
+
+def create_dirs(data_dir, params):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  Directory(data_dir,
+            recursive=True,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
+
 def datanode(action=None):
   import params
-
   if action == "configure":
     Directory(params.dfs_domain_socket_dir,
               recursive=True,
               mode=0751,
               owner=params.hdfs_user,
               group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(data_dir,
-                recursive=True,
-                mode=0755,
-                owner=params.hdfs_user,
-                group=params.user_group,
-                ignore_failures=True
-      )
+
+    handle_dfs_data_dir(create_dirs, params)
 
   elif action == "start" or action == "stop":
     service(

+ 9 - 6
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py

@@ -45,11 +45,11 @@ def namenode(action=None, do_format=True):
       create_log_dir=True
     )
     if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = format("su - {hdfs_user} -c 'hdfs haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
+      dfs_check_nn_status_cmd = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
     else:
       dfs_check_nn_status_cmd = None
 
-    namenode_safe_mode_off = format("su - {hdfs_user} -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'")
+    namenode_safe_mode_off = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} dfsadmin -safemode get' | grep 'Safe mode is OFF'")
 
     if params.security_enabled:
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
@@ -110,14 +110,16 @@ def format_namenode(force=None):
   if not params.dfs_ha_enabled:
     if force:
       ExecuteHadoop('namenode -format',
-                    kinit_override=True)
+                    kinit_override=True,
+                    bin_dir=params.hadoop_bin_dir,
+                    conf_dir=hadoop_conf_dir)
     else:
       File(format("{tmp_dir}/checkForFormat.sh"),
            content=StaticFile("checkForFormat.sh"),
            mode=0755)
       Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {old_mark_dir} "
-        "{mark_dir} {dfs_name_dir}"),
+        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
+        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
               not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
       )
@@ -154,4 +156,5 @@ def decommission():
   ExecuteHadoop(nn_refresh_cmd,
                 user=hdfs_user,
                 conf_dir=conf_dir,
-                kinit_override=True)
+                kinit_override=True,
+                bin_dir=params.hadoop_bin_dir)

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/namenode.py

@@ -88,7 +88,7 @@ class NameNode(Script):
     
     
     def startRebalancingProcess(threshold):
-      rebalanceCommand = format('hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      rebalanceCommand = format('export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
       return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
     
     command = startRebalancingProcess(threshold)

+ 33 - 11
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py

@@ -24,6 +24,26 @@ import os
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_bin_dir = "/usr/bin"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+limits_conf_dir = "/etc/security/limits.d"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
 ulimit_cmd = "ulimit -c unlimited; "
 
 #security params
@@ -96,9 +116,7 @@ user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -106,8 +124,6 @@ hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'
 dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
 
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
 jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
 
 dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
@@ -124,6 +140,13 @@ namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
 fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
 
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
@@ -174,11 +197,10 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 
-limits_conf_dir = "/etc/security/limits.d"
-
 io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
 if not "com.hadoop.compression.lzo" in io_compression_codecs:
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
@@ -187,14 +209,15 @@ else:
 name_node_params = default("/commandParams/namenode", None)
 
 #hadoop params
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
 hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 java_home = config['hostLevelParams']['java_home']
+stack_version = str(config['hostLevelParams']['stack_version'])
+
+stack_is_champlain_or_further = not (stack_version.startswith('2.0') or stack_version.startswith('2.1'))
 
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
+if stack_version.startswith('2.0') and System.get_instance().os_family != "suse":
   # deprecated rhel jsvc_path
   jsvc_path = "/usr/libexec/bigtop-utils"
 else:
@@ -214,5 +237,4 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

+ 22 - 13
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py

@@ -31,28 +31,26 @@ class HdfsServiceCheck(Script):
 
     safemode_command = "dfsadmin -safemode get | grep OFF"
 
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
+    create_dir_cmd = format("fs -mkdir {dir}")
+    chmod_command = format("fs -chmod 777 {dir}")
+    test_dir_exists = format("su -s /bin/bash - {smoke_user} -c '{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}'")
     cleanup_cmd = format("fs -rm {tmp_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file
     #that needs cleanup; exit code is fn of second command
     create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
     test_cmd = format("fs -test -e {tmp_file}")
-
-    log_dir = format("{hdfs_log_dir_prefix}/{smoke_user}")
-    Directory(log_dir, owner=params.smoke_user, recursive=True)
-
     if params.security_enabled:
       Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "su -s /bin/bash - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
         "{smoke_user}'"))
     ExecuteHadoop(safemode_command,
                   user=params.smoke_user,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=20
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
     )
     ExecuteHadoop(create_dir_cmd,
                   user=params.smoke_user,
@@ -60,21 +58,32 @@ class HdfsServiceCheck(Script):
                   not_if=test_dir_exists,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(chmod_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     ExecuteHadoop(create_file_cmd,
                   user=params.smoke_user,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     ExecuteHadoop(test_cmd,
                   user=params.smoke_user,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     if params.has_journalnode_hosts:
       journalnode_port = params.journalnode_port
@@ -83,7 +92,7 @@ class HdfsServiceCheck(Script):
       checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
       comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
       checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "su -s /bin/bash - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
         "{comma_sep_jn_hosts} -p {journalnode_port}'")
       File(checkWebUIFilePath,
            content=StaticFile(checkWebUIFileName))

+ 88 - 5
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py

@@ -16,8 +16,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import os
 
 from resource_management import *
+import re
 
 
 def service(action=None, name=None, user=None, create_pid_dir=False,
@@ -30,10 +32,6 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
   check_process = format(
     "ls {pid_file} >/dev/null 2>&1 &&"
     " ps -p `cat {pid_file}` >/dev/null 2>&1")
-  hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
   if create_pid_dir:
     Directory(pid_dir,
@@ -44,12 +42,76 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
               owner=user,
               recursive=True)
 
+  hadoop_env_exports = {
+    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
+  }
+
   if params.security_enabled and name == "datanode":
+    dfs_dn_port = get_port(params.dfs_dn_addr)
+    dfs_dn_http_port = get_port(params.dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(params.dfs_dn_https_addr)
+
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if params.dfs_http_policy == "HTTPS_ONLY":
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+    elif params.dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+
+    # Calculate HADOOP_SECURE_DN_* env vars, but not append them yet
+    # These variables should not be set when starting secure datanode as a non-root
+    ## On secure datanodes, user to run the datanode as after dropping privileges
+    hadoop_secure_dn_user = params.hdfs_user
+    ## Where log files are stored in the secure data environment.
+    hadoop_secure_dn_log_dir = format("{hdfs_log_dir_prefix}/{hadoop_secure_dn_user}")
+    ## The directory where pid files are stored in the secure data environment.
+    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hadoop_secure_dn_user}")
+    hadoop_secure_dn_exports = {
+      'HADOOP_SECURE_DN_USER' : hadoop_secure_dn_user,
+      'HADOOP_SECURE_DN_LOG_DIR' : hadoop_secure_dn_log_dir,
+      'HADOOP_SECURE_DN_PID_DIR' : hadoop_secure_dn_pid_dir
+    }
+    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
+
+    # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
+    if not params.stack_is_champlain_or_further or secure_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+      if params.stack_is_champlain_or_further:
+        hadoop_env_exports.update(hadoop_secure_dn_exports)
+
+    if action == 'stop' and params.stack_is_champlain_or_further and \
+      os.path.isfile(hadoop_secure_dn_pid_file):
+        # We need special handling for this case to handle the situation
+        # when we configure non-root secure DN and then restart it
+        # to handle new configs. Otherwise we will not be able to stop
+        # a running instance
+        user = "root"
+        try:
+          with open(hadoop_secure_dn_pid_file, 'r') as f:
+            pid = f.read()
+          os.kill(int(pid), 0)
+          hadoop_env_exports.update(hadoop_secure_dn_exports)
+        except IOError:
+          pass  # Can not open pid file
+        except ValueError:
+          pass  # Pid file content is invalid
+        except OSError:
+          pass  # Process is not running
 
-  daemon_cmd = format("{ulimit_cmd} su - {user} -c '{cmd} {action} {name}'")
+
+  hadoop_env_exports_str = ''
+  for exp in hadoop_env_exports.items():
+    hadoop_env_exports_str += "export {0}={1} && ".format(exp[0], exp[1])
+
+  hadoop_daemon = format(
+    "{hadoop_env_exports_str}"
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} {action} {name}'")
 
   service_is_up = check_process if action == "start" else None
   #remove pid file from dead process
@@ -64,3 +126,24 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
     File(pid_file,
          action="delete",
     )
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False

+ 57 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hcat-env.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.sh file</description>
+    <value>
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      JAVA_HOME={{java64_home}}
+      HCAT_PID_DIR={{hcat_pid_dir}}/
+      HCAT_LOG_DIR={{hcat_log_dir}}/
+      HCAT_CONF_DIR={{hcat_conf_dir}}
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      #DBROOT is the path where the connector jars are downloaded
+      DBROOT={{hcat_dbroot}}
+      USER={{hcat_user}}
+      METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml

@@ -28,7 +28,7 @@
   </property>
   <property>
     <name>hive_database</name>
-    <value>New PosgreSQL Database</value>
+    <value>New PostgreSQL Database</value>
     <description>
       Property that determines whether the HIVE DB is managed by Ambari.
     </description>

+ 248 - 36
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml

@@ -45,7 +45,7 @@ limitations under the License.
   </property>
 <!-- End changes metastore database to postgres -->
 
-    <property>
+  <property>
     <name>javax.jdo.option.ConnectionUserName</name>
     <value>hive</value>
     <description>username to use against metastore database</description>
@@ -53,7 +53,7 @@ limitations under the License.
 
   <property require-input="true">
     <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
+    <value></value>
     <property-type>PASSWORD</property-type>
     <description>password to use against metastore database</description>
   </property>
@@ -71,20 +71,6 @@ limitations under the License.
      Clients must authenticate with Kerberos.</description>
   </property>
 
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value>/etc/security/keytabs/hive.service.keytab</value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value>hive/_HOST@EXAMPLE.COM</value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
@@ -97,24 +83,6 @@ limitations under the License.
     <description>URI for client to contact metastore server</description>
   </property>
 
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side to run code
-      whenever databases, tables, and partitions are created, altered, or dropped.
-      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side to run code
-      whenever databases, tables, and partitions are created, altered, or dropped.
-      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
@@ -146,6 +114,24 @@ limitations under the License.
     <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
   </property>
 
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
   <property>
     <name>hive.security.authenticator.manager</name>
     <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
@@ -197,6 +183,12 @@ limitations under the License.
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
   </property>
 
+  <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not</description>
+  </property>
+
   <property>
     <name>hive.map.aggr</name>
     <value>true</value>
@@ -267,7 +259,7 @@ limitations under the License.
 
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
+    <value>4</value>
     <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than specified value.
@@ -295,7 +287,7 @@ limitations under the License.
 
   <property>
     <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
+    <value>true</value>
     <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
     </description>
   </property>
@@ -315,6 +307,191 @@ limitations under the License.
     </description>
   </property>
 
+  <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>Whether to use MR or Tez</description>
+  </property>
+
+  <!-- 
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of post-execution hooks to be invoked for each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of pre-execution hooks to be invoked for each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of on-failure hooks to be invoked for each statement.</description>
+  </property>
+  -->
+
+  <property>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>1024</value>
+    <description>Number of entries added to the group by aggregation hash before a reocmputation of average entry size is performed.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed when the memory treshold is exceeded.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use this property to override that value. Assigned value must match value specified for mapreduce.map.child.java.opts.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC</value>
+    <description>Java command line options for Tez. Must be assigned the same value as mapreduce.map.child.java.opts.</description>
+  </property>
+
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely using stats
+      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable the optimization of trying a smaller subset of data for simple LIMIT first.</description>
+  </property>
+
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.default.queues</name>
+    <value>default</value>
+    <description>A comma-separated list of queues configured for the cluster.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>The number of sessions for each queue named in the hive.server2.tez.default.queues.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>false</value>
+    <description>Enables a user to use HiveServer2 without enabling Tez for HiveServer2. Users may potentially may want to run queries with Tez without a pool of sessions.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description>Select the class to do transaction management. The default DummyTxnManager does no transactions and retains the legacy behavior.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>Maximum number of transactions that can be fetched in one call to open_txns(). Increasing this will decrease the number of delta files created when streaming data into Hive. But it will also increase the number of open transactions at any given time, possibly impacting read performance.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>Time, in seconds, before a given compaction in working state is declared a failure and returned to the initiated state.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>Time in seconds between checks to see if any partitions need compacted. This should be kept high because each check for compaction requires many calls against the NameNode.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+  </property>
+
   <property>
     <name>hive.server2.thrift.port</name>
     <value>10000</value>
@@ -323,4 +500,39 @@ limitations under the License.
     </description>
   </property>
 
+  <property>
+      <name>hive.server2.authentication.spnego.principal</name>
+      <value>HTTP/_HOST@EXAMPLE.COM</value>
+      <description>
+          This keytab would be used by HiveServer2 when Kerberos security is enabled and HTTP transport mode is used.
+      </description>
+  </property>
+
+  <property>
+      <name>hive.server2.authentication.spnego.keytab</name>
+      <value>/etc/security/keytabs/spnego.service.keytab</value>
+      <description>
+          The SPNEGO service principal would be used by HiveServer2 when Kerberos security is enabled and HTTP transport mode is used.
+      </description>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its
+      clients. To support this, each instance of HiveServer2 currently uses
+      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
+      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
+      connection string.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when
+      supporting dynamic service discovery.
+    </description>
+  </property>
+
 </configuration>

+ 2 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/configuration/webhcat-env.xml → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-env.xml

@@ -27,7 +27,7 @@
     <description>webhcat-env.sh content</description>
     <value>
 # The file containing the running pid
-PID_FILE={{pid_file}}
+PID_FILE={{webhcat_pid_file}}
 
 TEMPLETON_LOG_DIR={{templeton_log_dir}}/
 
@@ -47,7 +47,7 @@ CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
 #HCAT_PREFIX=hive_prefix
 
 # Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop
+export HADOOP_HOME={{hadoop_home}}
     </value>
   </property>
   

+ 0 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/configuration/webhcat-site.xml → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-site.xml


+ 79 - 74
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml

@@ -30,6 +30,7 @@
           <displayName>Hive Metastore</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
           <auto-deploy>
             <enabled>true</enabled>
             <co-locate>HIVE/HIVE_SERVER</co-locate>
@@ -46,6 +47,7 @@
           <displayName>HiveServer2</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
           <dependencies>
             <dependency>
               <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
@@ -75,7 +77,58 @@
             <scriptType>PYTHON</scriptType>
           </commandScript>
         </component>
-
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs>
+            <client>HCAT</client>
+          </clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
         <component>
           <name>POSTGRESQL_SERVER</name>
           <displayName>PostgreSQL Server</displayName>
@@ -92,6 +145,7 @@
           <displayName>MySQL Server</displayName>
           <category>MASTER</category>
           <cardinality>0-1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
           <commandScript>
             <script>scripts/mysql_server.py</script>
             <scriptType>PYTHON</scriptType>
@@ -130,6 +184,22 @@
             </configFile>                         
           </configFiles>
         </component>
+        <component>
+          <name>HCAT</name>
+          <displayName>HCat Client</displayName>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>hcat-env.sh</fileName>
+              <dictionaryName>hcat-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
       </components>
 
       <osSpecifics>
@@ -139,6 +209,12 @@
             <package>
               <name>hive</name>
             </package>
+            <package>
+              <name>hive-hcatalog</name>
+            </package>
+            <package>
+              <name>hive-webhcat</name>
+            </package>
             <package>
               <name>postgresql-server</name>
             </package>
@@ -192,80 +268,9 @@
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
         <config-type>hive-env</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
       </configuration-dependencies>
     </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <displayName>HCatalog</displayName>
-      <comment>A table and storage management layer for Hadoop that enables users with different data processing tools
-        to more easily read and write data on the grid.
-      </comment>
-      <version>0.13.0.689</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <displayName>HCat</displayName>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-env.sh</fileName>
-              <dictionaryName>hive-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-log4j.properties</fileName>
-              <dictionaryName>hive-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-exec-log4j.properties</fileName>
-              <dictionaryName>hive-exec-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive-hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HIVE</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-env</config-type>
-      </configuration-dependencies>
-      <excluded-config-types>
-        <config-type>hive-env</config-type>
-        <config-type>hive-site</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>hive-log4j</config-type>
-      </excluded-config-types>
-    </service>
-
   </services>
 </metainfo>

+ 5 - 5
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/files/templetonSmoke.sh

@@ -35,7 +35,7 @@ fi
 
 export no_proxy=$ttonhost
 cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
+retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 
 if [[ "$httpExitCode" -ne "200" ]] ; then
@@ -49,7 +49,7 @@ exit 0
 #try hcat ddl command
 echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
 cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
+retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 
 if [[ "$httpExitCode" -ne "200" ]] ; then
@@ -75,17 +75,17 @@ echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
 echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
 
 #copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
 
 #copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
 
 #create, copy post args file
 echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
 
 #submit pig query
 cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
+retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
 httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
 if [[ "$httpExitCode" -ne "200" ]] ; then
   echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"

+ 12 - 9
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py

@@ -25,7 +25,15 @@ import sys
 def hcat():
   import params
 
+  Directory(params.hive_conf_dir,
+            recursive=True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+
   Directory(params.hcat_conf_dir,
+            recursive=True,
             owner=params.hcat_user,
             group=params.user_group,
   )
@@ -43,13 +51,8 @@ def hcat():
             group=params.user_group,
             mode=0644)
 
-  hcat_TemplateConfig('hcat-env.sh')
-
-
-def hcat_TemplateConfig(name):
-  import params
-
-  TemplateConfig(format("{hcat_conf_dir}/{name}"),
-                 owner=params.hcat_user,
-                 group=params.user_group
+  File(format("{hcat_conf_dir}/hcat-env.sh"),
+       owner=params.hcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hcat_env_sh_template)
   )

+ 6 - 4
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py

@@ -44,7 +44,7 @@ def hcat_service_check():
             tries=3,
             user=params.smokeuser,
             try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin', params.execute_path],
             logoutput=True)
 
     if params.security_enabled:
@@ -55,7 +55,8 @@ def hcat_service_check():
                     security_enabled=params.security_enabled,
                     kinit_path_local=params.kinit_path_local,
                     keytab=params.hdfs_user_keytab,
-                    principal=params.hdfs_principal_name
+                    principal=params.hdfs_principal_name,
+                    bin_dir=params.execute_path
       )
     else:
       ExecuteHadoop(test_cmd,
@@ -64,7 +65,8 @@ def hcat_service_check():
                     conf_dir=params.hadoop_conf_dir,
                     security_enabled=params.security_enabled,
                     kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab
+                    keytab=params.hdfs_user_keytab,
+                    bin_dir=params.execute_path
       )
 
     cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
@@ -73,6 +75,6 @@ def hcat_service_check():
             tries=3,
             user=params.smokeuser,
             try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin', params.execute_path],
             logoutput=True
     )

+ 19 - 16
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py

@@ -44,7 +44,21 @@ def hive(name=None):
   # The reason is that stale-configs are service-level, not component.
   for conf_dir in params.hive_conf_dirs_list:
     fill_conf_dir(conf_dir)
-    
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
   if name == 'metastore' or name == 'hiveserver2':
     jdbc_connector()
     
@@ -92,7 +106,7 @@ def hive(name=None):
     crt_directory(params.hive_pid_dir)
     crt_directory(params.hive_log_dir)
     crt_directory(params.hive_var_lib)
-    
+
 def fill_conf_dir(component_conf_dir):
   import params
   
@@ -110,20 +124,7 @@ def fill_conf_dir(component_conf_dir):
             group=params.user_group,
             mode=0644)
 
-  XmlConfig("hive-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-  
-  File(format("{component_conf_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hive_env_sh_template)
-  )
-  
+
   crt_file(format("{component_conf_dir}/hive-default.xml.template"))
   crt_file(format("{component_conf_dir}/hive-env.sh.template"))
 
@@ -188,6 +189,7 @@ def jdbc_connector():
     Execute(cmd,
             not_if=format("test -f {target}"),
             creates=params.target,
+            environment= {'PATH' : params.execute_path },
             path=["/bin", "/usr/bin/"])
   elif params.hive_jdbc_driver == "org.postgresql.Driver":
     cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
@@ -195,6 +197,7 @@ def jdbc_connector():
     Execute(cmd,
             not_if=format("test -f {target}"),
             creates=params.target,
+            environment= {'PATH' : params.execute_path },
             path=["/bin", "usr/bin/"])
 
   elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":

+ 15 - 19
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py

@@ -19,7 +19,6 @@ limitations under the License.
 """
 
 from resource_management import *
-import socket
 import sys
 import time
 from resource_management.core.shell import call
@@ -49,6 +48,8 @@ def hive_service(
     
     Execute(demon_cmd,
             user=params.hive_user,
+            environment={'HADOOP_HOME': params.hadoop_home},
+            path=params.execute_path,
             not_if=process_id_exists
     )
 
@@ -70,23 +71,16 @@ def hive_service(
       
       start_time = time.time()
       end_time = start_time + SOCKET_WAIT_SECONDS
-      
-      s = socket.socket()
-      s.settimeout(5)
-            
+
       is_service_socket_valid = False
       print "Waiting for the Hive server to start..."
-      try:
-        while time.time() < end_time:
-          try:
-            s.connect((address, port))
-            is_service_socket_valid = True
-            break
-          except socket.error, e:          
-            time.sleep(5)
-      finally:
-        s.close()
-      
+      while time.time() < end_time:
+        if check_thrift_port_sasl(address, port, 2, security_enabled=params.security_enabled):
+          is_service_socket_valid = True
+          break
+        else:
+          time.sleep(2)
+
       elapsed_time = time.time() - start_time    
       
       if is_service_socket_valid == False: 
@@ -103,8 +97,10 @@ def hive_service(
 def check_fs_root():
   import params  
   fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
-  cmd = "/usr/lib/hive/bin/metatool -listFSRoot 2>/dev/null | grep hdfs://"
+  cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs://")
   code, out = call(cmd, user=params.hive_user)
   if code == 0 and fs_root_url.strip() != out.strip():
-    cmd = format("/usr/lib/hive/bin/metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
-    Execute(cmd, user=params.hive_user)
+    cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
+    Execute(cmd,
+            environment= {'PATH' : params.execute_path },
+            user=params.hive_user)

+ 23 - 14
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py

@@ -53,7 +53,7 @@ def install_tez_jars():
     app_dir_path = None
     lib_dir_path = None
 
-    if len(destination_hdfs_dirs) > 1:
+    if len(destination_hdfs_dirs) > 0:
       for path in destination_hdfs_dirs:
         if 'lib' in path:
           lib_dir_path = path
@@ -64,14 +64,17 @@ def install_tez_jars():
     pass
 
     if app_dir_path:
-      CopyFromLocal(params.tez_local_api_jars,
-                    mode=0755,
-                    owner=params.tez_user,
-                    dest_dir=app_dir_path,
-                    kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user
-      )
-    pass
+      for scr_file, dest_file in params.app_dir_files.iteritems():
+        CopyFromLocal(scr_file,
+                      mode=0755,
+                      owner=params.tez_user,
+                      dest_dir=app_dir_path,
+                      dest_file=dest_file,
+                      kinnit_if_needed=kinit_if_needed,
+                      hdfs_user=params.hdfs_user,
+                      hadoop_bin_dir=params.hadoop_bin_dir,
+                      hadoop_conf_dir=params.hadoop_conf_dir
+        )
 
     if lib_dir_path:
       CopyFromLocal(params.tez_local_lib_jars,
@@ -79,7 +82,9 @@ def install_tez_jars():
                     owner=params.tez_user,
                     dest_dir=lib_dir_path,
                     kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user
+                    hdfs_user=params.hdfs_user,
+                    hadoop_bin_dir=params.hadoop_bin_dir,
+                    hadoop_conf_dir=params.hadoop_conf_dir
       )
     pass
 
@@ -90,10 +95,14 @@ def get_tez_hdfs_dir_paths(tez_lib_uris = None):
   if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
     dir_paths = tez_lib_uris.split(',')
     for path in dir_paths:
-      lib_dir_path = path.replace(hdfs_path_prefix, '')
-      lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
-      lib_dir_paths.append(lib_dir_path)
+      if not "tez.tar.gz" in path:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
+        lib_dir_paths.append(lib_dir_path)
+      else:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_paths.append(os.path.dirname(lib_dir_path))
     pass
   pass
 
-  return lib_dir_paths
+  return lib_dir_paths

+ 88 - 20
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py

@@ -26,6 +26,59 @@ import os
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+hdp_stack_version = config['hostLevelParams']['stack_version']
+
+#hadoop params
+if rpm_version:
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+  hadoop_home = '/usr/bigtop/current/hadoop-client'
+  hadoop_streeming_jars = "/usr/bigtop/current/hadoop-mapreduce-client/hadoop-streaming-*.jar"
+  hive_bin = '/usr/bigtop/current/hive-client/bin'
+  hive_lib = '/usr/bigtop/current/hive-client/lib'
+  tez_local_api_jars = '/usr/bigtop/current/tez-client/tez*.jar'
+  tez_local_lib_jars = '/usr/bigtop/current/tez-client/lib/*.jar'
+  tez_tar_file = "/usr/bigtop/current/tez-client/lib/tez*.tar.gz"
+  pig_tar_file = '/usr/bigtop/current/pig-client/pig.tar.gz'
+  hive_tar_file = '/usr/bigtop/current/hive-client/hive.tar.gz'
+  sqoop_tar_file = '/usr/bigtop/current/sqoop-client/sqoop*.tar.gz'
+
+  hcat_lib = '/usr/bigtop/current/hive/hive-hcatalog/share/hcatalog'
+  webhcat_bin_dir = '/usr/bigtop/current/hive-hcatalog/sbin'
+
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+  hive_bin = '/usr/lib/hive/bin'
+  hive_lib = '/usr/lib/hive/lib/'
+  tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+  tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+  tez_tar_file = "/usr/lib/tez/tez*.tar.gz"
+  pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
+  hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
+  sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
+
+  if str(hdp_stack_version).startswith('2.0'):
+    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+  # for newer versions
+  else:
+    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hive_conf_dir = "/etc/hive/conf"
+hive_client_conf_dir = "/etc/hive/conf"
+hive_server_conf_dir = '/etc/hive/conf.server'
+
+# for newer versions
+hcat_conf_dir = '/etc/hive-hcatalog/conf'
+config_dir = '/etc/hive-webhcat/conf'
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
 hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
 hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 
@@ -34,7 +87,6 @@ hive_metastore_db_type = config['configurations']['hive-env']['hive_database_typ
 
 #users
 hive_user = config['configurations']['hive-env']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
 if hive_jdbc_driver == "com.mysql.jdbc.Driver":
@@ -51,11 +103,9 @@ check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 
 #common
-hdp_stack_version = config['hostLevelParams']['stack_version']
 hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
 hive_var_lib = '/var/lib/hive'
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-hive_bin = '/usr/lib/hive/bin'
 hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
@@ -77,8 +127,6 @@ hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 #Default conf dir for client
-hive_client_conf_dir = "/etc/hive/conf"
-hive_server_conf_dir = "/etc/hive/conf"
 hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
 
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
@@ -92,8 +140,6 @@ hive_database_name = config['configurations']['hive-env']['hive_database_name']
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh.j2'
 
-hadoop_home = '/usr/lib/hadoop'
-
 ##Starting metastore
 start_metastore_script = 'startMetastore.sh'
 hive_metastore_pid = status_params.hive_metastore_pid
@@ -137,8 +183,6 @@ postgresql_daemon_name = status_params.postgresql_daemon_name
 init_metastore_schema = True
 
 ########## HCAT
-hcat_conf_dir = '/etc/hive-hcatalog/conf'
-hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
 
 hcat_dbroot = hcat_lib
 
@@ -147,8 +191,7 @@ webhcat_user = config['configurations']['hive-env']['webhcat_user']
 
 hcat_pid_dir = status_params.hcat_pid_dir
 hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-
-hadoop_conf_dir = '/etc/hadoop/conf'
+hcat_env_sh_template = config['configurations']['hcat-env']['content']
 
 #hive-log4j.properties.template
 if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
@@ -170,23 +213,17 @@ hive_hdfs_user_mode = 0700
 hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 #for create_hdfs_directory
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 # Tez libraries
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
 tez_user = config['configurations']['tez-env']['tez_user']
 
 if System.get_instance().os_family == "ubuntu":
   mysql_configname = '/etc/mysql/my.cnf'
 else:
   mysql_configname = '/etc/my.cnf'
-  
 
 # Hive security
 hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
@@ -200,16 +237,47 @@ if os.path.exists(mysql_jdbc_driver_jar):
 else:  
   hive_exclude_packages = []
 
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
+if str(hdp_stack_version).startswith('2.0') or str(hdp_stack_version).startswith('2.1'):
+  app_dir_files = {tez_local_api_jars:None}
+else:
+  app_dir_files = {
+              tez_local_api_jars:None,
+              tez_tar_file:"tez.tar.gz"
+  }
+
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
 HdfsDirectory = functools.partial(
   HdfsDirectory,
   conf_dir=hadoop_conf_dir,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )

+ 6 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py

@@ -41,8 +41,12 @@ class PostgreSQLServer(Script):
     self.update_pghda_conf(env)
     self.update_postgresql_conf(env)
 
-    # restart the postgresql server for the changes to take effect
-    self.stop(env)
+    # Reload the settings and start the postgresql server for the changes to take effect
+    # Note: Don't restart the postgresql server because when Ambari server and the hive metastore on the same machine,
+    # they will share the same postgresql server instance. Restarting the postgresql database may cause the ambari server database connection lost
+    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'reload')
+
+    # ensure the postgresql server is started because the add hive metastore user requires the server is running.
     self.start(env)
 
     # create the database and hive_metastore_user

+ 2 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py

@@ -37,3 +37,5 @@ def postgresql_service(postgresql_daemon_name=None, action='start'):
       logoutput = True,
       not_if = status_cmd
     )
+  else:
+    Execute(cmd, logoutput = True)

+ 6 - 7
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py

@@ -23,6 +23,7 @@ import socket
 import sys
 
 from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
 
 class HiveServiceCheck(Script):
   def service_check(self, env):
@@ -31,17 +32,15 @@ class HiveServiceCheck(Script):
 
     address=format("{hive_server_host}")
     port=int(format("{hive_server_port}"))
-    s = socket.socket()
     print "Test connectivity to hive server"
-    try:
-      s.connect((address, port))
+    if check_thrift_port_sasl(address, port, security_enabled=params.security_enabled):
       print "Successfully connected to %s on port %s" % (address, port)
-      s.close()
-    except socket.error, e:
-      print "Connection to %s on port %s failed: %s" % (address, port, e)
-      sys.exit(1)
+    else:
+      print "Connection to %s on port %s failed" % (address, port)
+      exit(1)
 
     hcat_service_check()
+    webhcat_service_check()
 
 if __name__ == "__main__":
   HiveServiceCheck().execute()

+ 1 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py

@@ -28,6 +28,7 @@ hive_pid = 'hive-server.pid'
 hive_metastore_pid = 'hive.pid'
 
 hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
+webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
 
 if System.get_instance().os_family == "suse" or System.get_instance().os_family == "ubuntu":
   daemon_name = 'mysql'

+ 40 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat.py → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat.py

@@ -20,6 +20,8 @@ Ambari Agent
 """
 from resource_management import *
 import sys
+import os.path
+import glob
 
 
 def webhcat():
@@ -56,6 +58,7 @@ def webhcat():
             recursive=True)
 
   Directory(params.config_dir,
+            recursive=True,
             owner=params.webhcat_user,
             group=params.user_group)
 
@@ -84,10 +87,45 @@ def webhcat():
             path='/bin'
     )
 
-  CopyFromLocal('/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar',
+  CopyFromLocal(params.hadoop_streeming_jars,
                 owner=params.webhcat_user,
                 mode=0755,
                 dest_dir=params.webhcat_apps_dir,
                 kinnit_if_needed=kinit_if_needed,
-                hdfs_user=params.hdfs_user
+                hdfs_user=params.hdfs_user,
+                hadoop_bin_dir=params.hadoop_bin_dir,
+                hadoop_conf_dir=params.hadoop_conf_dir
   )
+
+  if (os.path.isfile(params.pig_tar_file)):
+    CopyFromLocal(params.pig_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )
+
+  if (os.path.isfile(params.hive_tar_file)):
+    CopyFromLocal(params.hive_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )
+
+  if (len(glob.glob(params.sqoop_tar_file)) > 0):
+    CopyFromLocal(params.sqoop_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_server.py → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py

@@ -47,7 +47,7 @@ class WebHCatServer(Script):
   def status(self, env):
     import status_params
     env.set_params(status_params)
-    check_process_status(status_params.pid_file)
+    check_process_status(status_params.webhcat_pid_file)
 
 if __name__ == "__main__":
   WebHCatServer().execute()

+ 2 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_service.py → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service.py

@@ -27,7 +27,7 @@ def webhcat_service(action='start'):
 
   if action == 'start':
     demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1')
+    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1')
     Execute(demon_cmd,
             user=params.webhcat_user,
             not_if=no_op_test
@@ -37,4 +37,4 @@ def webhcat_service(action='start'):
     Execute(demon_cmd,
             user=params.webhcat_user
     )
-    Execute(format('rm -f {pid_file}'))
+    Execute(format('rm -f {webhcat_pid_file}'))

+ 14 - 14
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/sqoop_client.py → ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service_check.py

@@ -18,24 +18,24 @@ limitations under the License.
 
 """
 
-import sys
 from resource_management import *
 
-from sqoop import sqoop
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+       content= StaticFile('templetonSmoke.sh'),
+       mode=0755
+  )
 
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+               " {security_param} {kinit_path_local}",
+               smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
 
-class SqoopClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+          logoutput=True)
 
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    sqoop(type='client')
 
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
 
-if __name__ == "__main__":
-  SqoopClient().execute()

+ 0 - 43
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/hcat-env.sh.j2

@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HCAT_PID_DIR={{hcat_pid_dir}}/
-HCAT_LOG_DIR={{hcat_log_dir}}/
-HCAT_CONF_DIR={{hcat_conf_dir}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT={{hcat_dbroot}}
-USER={{hcat_user}}
-METASTORE_PORT={{hive_metastore_port}}

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/startHiveserver2.sh.j2

@@ -25,5 +25,5 @@ HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.d
 HIVE_SERVER2_OPTS="${HIVE_SERVER2_OPTS} -hiveconf hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator -hiveconf hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory "
 {% endif %}
 
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
+HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
 echo $!|cat>$3

+ 0 - 66
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml

@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>MAHOUT</name>
-      <displayName>Mahout</displayName>
-      <comment>The Apache Mahout project's goal is to build a scalable machine learning library</comment>
-      <version>0.9.666</version>
-      <components>
-        <component>
-          <name>MAHOUT</name>
-          <displayName>Mahout Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/mahout_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>mahout</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>600</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

+ 0 - 66
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py

@@ -1,66 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-
-def mahout():
-  import params
-
-  Directory( params.mahout_conf_dir,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  mahout_TemplateConfig( ['mahout-env.sh'])
-
-  # mahout_properties is always set to a default even if it's not in the payload
-  File(format("{mahout_conf_dir}/mahout.properties"),
-              mode=0644,
-              group=params.user_group,
-              owner=params.hdfs_user,
-              content=params.mahout_properties
-  )
-
-  if params.log4j_props:
-    File(format("{mahout_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user,
-      content=params.log4j_props
-    )
-  elif (os.path.exists(format("{mahout_conf_dir}/log4j.properties"))):
-    File(format("{mahout_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user
-    )
-
-def mahout_TemplateConfig(name):
-  import params
-
-  if not isinstance(name, list):
-    name = [name]
-
-  for x in name:
-    TemplateConfig( format("{mahout_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )

+ 0 - 36
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout_client.py

@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-Ambari Agent
-"""
-import sys
-from resource_management import *
-from mahout import mahout
-
-class MahoutClient(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mahout()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  MahoutClient().execute()

+ 0 - 55
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/params.py

@@ -1,55 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-
-hadoop_log_dir = "/var/log/hadoop"
-mahout_conf_dir = "/etc/mahout/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr/lib/hadoop/"
-
-# mahout.properties - if not in the JSON command, then we need to esnure some 
-# basic properties are set; this is a safety mechanism
-if (('mahout-properties' in config['configurations']) and ('mahout-content' in config['configurations']['mahout-properties'])):
-  mahout_properties = config['configurations']['mahout-properties']['mahout-content']
-else:
-  mahout_properties = "mahout.location.check.strict=false"
-
-# log4j.properties
-if (('mahout-log4j' in config['configurations']) and ('content' in config['configurations']['mahout-log4j'])):
-  log4j_props = config['configurations']['mahout-log4j']['content']
-else:
-  log4j_props = None

+ 0 - 92
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/service_check.py

@@ -1,92 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import os
-from resource_management import *
-
-class MahoutServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    # prepare the input file content
-    input_file = os.path.join("/tmp", "mahout_input.csv")
-    input_file_content = """\
-1,101,5.0
-1,102,5.0
-1,103,2.5
-2,101,2.0
-2,102,2.5
-2,103,5.0
-2,104,2.0
-3,101,2.5
-3,104,4.0
-3,105,4.5
-3,107,5.0"""
-
-    File(input_file, content=input_file_content, mode=644)
-
-    # create the log dir for the smoke user
-    Directory(os.path.join(params.hadoop_log_dir, params.smokeuser),
-	            owner = params.smokeuser,
-              group = params.user_group,
-              mode = 755)
-
-    # transfer the input file to hdfs
-    recommenderdata_dir = "recommenderdata"
-    recommenderoutput_dir = "recommenderoutput"
-    cleanup_cmd = format("fs -rm -r {recommenderdata_dir} {recommenderoutput_dir} temp")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop fs -put {input_file} {recommenderdata_dir}") #TODO: inconsistent that second command needs hadoop
-
-    test_cmd_cat = "mahout cat /etc/passwd"
-    test_cmd_recommendation = format("mahout recommenditembased --input {recommenderdata_dir} --output {recommenderoutput_dir} -s SIMILARITY_COOCCURRENCE")
-
-    ExecuteHadoop(create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir,
-      # for kinit run
-      keytab = params.smoke_user_keytab,
-      security_enabled = params.security_enabled,
-      kinit_path_local = params.kinit_path_local,
-      logoutput = True
-    )
-
-    Execute(test_cmd_cat,
-      tries     = 3,
-      try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      user      = params.smokeuser,
-      logoutput = True
-    )
-
-    Execute(test_cmd_recommendation,
-      tries     = 3,
-      try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      user      = params.smokeuser,
-      logoutput = True,
-    )
-
-if __name__ == "__main__":
-  MahoutServiceCheck().execute()

+ 0 - 34
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/templates/mahout-env.sh.j2

@@ -1,34 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

+ 2 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/configuration/oozie-env.xml

@@ -67,7 +67,7 @@
 
 if [ -d "/usr/lib/bigtop-tomcat" ]; then
   export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
-  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}
+  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}
   export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
   export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
 fi
@@ -122,7 +122,7 @@ export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
 # The base URL for callback URLs to Oozie
 #
 # export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64
     </value>
   </property>
 

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/configuration/oozie-log4j.xml

@@ -52,7 +52,7 @@ log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
 log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
 log4j.appender.oozie.Append=true
 log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
 
 log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd

+ 1 - 13
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/metainfo.xml

@@ -23,7 +23,7 @@
       <displayName>Oozie</displayName>
       <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
       </comment>
-      <version>4.0.0.2.0</version>
+      <version>4.0.1.691</version>
       <components>
         <component>
           <name>OOZIE_SERVER</name>
@@ -121,21 +121,9 @@
           </packages>
         </osSpecific>
         
-       <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>extjs-2.2-1</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
         <osSpecific>
           <osFamily>ubuntu12</osFamily>
           <packages>
-            <package>
-              <name>extjs</name>
-            </package>
             <package>
               <name>libxml2-utils</name>
             </package>

+ 19 - 14
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/files/oozieSmoke2.sh

@@ -35,10 +35,10 @@ function checkOozieJobStatus {
   num_of_tries=${num_of_tries:-10}
   local i=0
   local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  su -s /bin/bash - ${smoke_test_user} -c "$cmd"
   while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
+    cmd_output=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
     (IFS='';echo $cmd_output)
     act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
     echo "workflow_status=$act_status"
@@ -58,11 +58,13 @@ function checkOozieJobStatus {
 }
 
 export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
+export oozie_bin_dir=$2
+export hadoop_conf_dir=$3
+export hadoop_bin_dir=$4
+export smoke_test_user=$5
+export security_enabled=$6
+export smoke_user_keytab=$7
+export kinit_path_local=$8
 
 export OOZIE_EXIT_CODE=0
 export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
@@ -77,6 +79,9 @@ fi
   
 
 export OOZIE_EXAMPLES_DIR=`$LIST_PACKAGE_FILES_CMD oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
+if [[ -z "$OOZIE_EXAMPLES_DIR" ]] ; then
+  export OOZIE_EXAMPLES_DIR='/usr/bigtop/current/oozie-client/doc/'
+fi
 cd $OOZIE_EXAMPLES_DIR
 
 tar -zxf oozie-examples.tar.gz
@@ -93,14 +98,14 @@ else
   kinitcmd=""
 fi
 
-su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
-su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
 
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
 echo $cmd
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
+job_info=`su -s /bin/bash - ${smoke_test_user} -c "$cmd" | grep "job:"`
 job_id="`echo $job_info | cut -d':' -f2`"
 checkOozieJobStatus "$job_id" 15
 OOZIE_EXIT_CODE="$?"

+ 52 - 22
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie.py

@@ -31,6 +31,11 @@ def oozie(is_server=False # TODO: see if see can remove this
                          owner=params.oozie_user,
                          mode=params.oozie_hdfs_user_mode
     )
+  Directory( params.conf_dir,
+             recursive = True,
+             owner = params.oozie_user,
+             group = params.user_group
+  )
   XmlConfig( "oozie-site.xml",
     conf_dir = params.conf_dir,
     configurations = params.config['configurations']['oozie-site'],
@@ -39,16 +44,23 @@ def oozie(is_server=False # TODO: see if see can remove this
     group = params.user_group,
     mode = 0664
   )
-  Directory( params.conf_dir,
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
   File(format("{conf_dir}/oozie-env.sh"),
     owner=params.oozie_user,
     content=InlineTemplate(params.oozie_env_sh_template)
   )
 
+  if params.security_enabled:
+    tomcat_conf_dir = format("{tomcat_conf_secure}")
+  else:
+    tomcat_conf_dir = format("{tomcat_conf}")
+
+  File(format("{tomcat_conf_dir}/catalina.properties"),
+    content = Template("catalina.properties.j2"),
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0755
+  )
+
   if (params.log4j_props != None):
     File(format("{params.conf_dir}/oozie-log4j.properties"),
       mode=0644,
@@ -121,32 +133,50 @@ def oozie_server_specific(
     not_if="ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
   )
   
-  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
+  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
   Directory( oozie_server_directorties,
     owner = params.oozie_user,
     mode = 0755,
     recursive = True
   )
 
-  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
-  
-  # this is different for HDP1
-  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir} && mkdir -p {oozie_libext_dir} && cp {ext_js_path} {oozie_libext_dir}")
+  cmd1 = "sh"
+
   if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    cmd3 += format(" && cp {jdbc_driver_jar} {oozie_libext_dir}")
-  #falcon el extension
-  if params.has_falcon_host:
-    cmd3 += format(' && cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}')
-  # this is different for HDP1
-  cmd4 = format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war")
+    cmd1 += format(" && cp {jdbc_driver_jar} {oozie_lib_dir}")
 
   no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-  Execute( [cmd1, cmd2, cmd3],
+  Execute( [cmd1],
     not_if  = no_op_test
   )
-  Execute( cmd4,
-    user = params.oozie_user,
-    not_if  = no_op_test
+
+# the version of hadoop-auth jar files in bigtop 0.8 oozie is wrong
+def correct_hadoop_auth_jar_files():
+
+  hadoop_auth_jar_file = "/usr/lib/hadoop/hadoop-auth-2.4.1.jar"
+
+  if not os.path.exists(hadoop_auth_jar_file):
+    raise Fail("Could not find %s" % (hadoop_auth_jar_file))
+
+  commands = ' '.join(
+    (
+      "if [ -f /usr/lib/oozie/lib/hadoop-auth-2.0.2-alpha.jar ];",
+      "then",
+      "rm -rf /usr/lib/oozie/lib/hadoop-auth-2.0.2-alpha.jar;",
+      "cp " + hadoop_auth_jar_file + " /usr/lib/oozie/lib;",
+      "fi"
+    )
   )
-  
+  Execute(commands)
+
+  commands = ' '.join(
+    (
+      "if [ -f /usr/lib/oozie/libtools/hadoop-auth-2.0.2-alpha.jar ];",
+      "then",
+      "rm -rf /usr/lib/oozie/libtools/hadoop-auth-2.0.2-alpha.jar;",
+      "cp " + hadoop_auth_jar_file + " /usr/lib/oozie/libtools;",
+      "fi"
+    )
+  )
+  Execute(commands)
+

+ 2 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_client.py

@@ -22,12 +22,14 @@ import sys
 from resource_management import *
 
 from oozie import oozie
+from oozie import correct_hadoop_auth_jar_files
 from oozie_service import oozie_service
 
          
 class OozieClient(Script):
   def install(self, env):
     self.install_packages(env)
+    correct_hadoop_auth_jar_files()
     self.configure(env)
     
   def configure(self, env):

+ 3 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_server.py

@@ -22,13 +22,15 @@ import sys
 from resource_management import *
 
 from oozie import oozie
+from oozie import correct_hadoop_auth_jar_files
 from oozie_service import oozie_service
 
          
 class OozieServer(Script):
   def install(self, env):
     self.install_packages(env)
-    
+    correct_hadoop_auth_jar_files()
+
   def configure(self, env):
     import params
     env.set_params(params)

+ 11 - 12
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_service.py

@@ -27,7 +27,7 @@ def oozie_service(action = 'start'): # 'start' or 'stop'
   no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
   
   if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
+    start_cmd = "service oozie start"
     
     if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
        params.jdbc_driver_name == "org.postgresql.Driver" or \
@@ -36,37 +36,36 @@ def oozie_service(action = 'start'): # 'start' or 'stop'
     else:
       db_connection_check_command = None
       
-    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+    cmd1 =  "service oozie init"
+    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
 
     if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
-      print "ERROR: jdbc file " + params.jdbc_driver_jar + " is unavailable. Please, follow next steps:\n" \
-        "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p /usr/lib/oozie/libserver/\n" \
+      print format("ERROR: jdbc file {jdbc_driver_jar} is unavailable. Please, follow next steps:\n" \
+        "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
         "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
-        "/usr/lib/oozie/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
-        "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar /usr/lib/oozie/libext/\n"
+        "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+        "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
       exit(1)
 
     if db_connection_check_command:
       Execute( db_connection_check_command, tries=5, try_sleep=10)
                   
     Execute( cmd1,
-      user = params.oozie_user,
       not_if  = no_op_test,
       ignore_failures = True
     ) 
     
     Execute( cmd2,
-      user = params.oozie_user,       
-      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+      user = params.oozie_user,
+      not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+      path = params.execute_path
     )
     
     Execute( start_cmd,
-      user = params.oozie_user,
       not_if  = no_op_test,
     )
   elif action == 'stop':
-    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
+    stop_cmd  = "service oozie stop"
     Execute( stop_cmd,
       only_if  = no_op_test
     )

+ 67 - 21
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/params.py

@@ -20,31 +20,87 @@ limitations under the License.
 
 from resource_management import *
 import status_params
+import os
+import fnmatch
 
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+  hadoop_lib_home = "/usr/bigtop/current/hadoop-client/lib"
+  hive_lib_dir = "/usr/bigtop/current/hive-client/lib"
+  oozie_lib_dir = "/usr/bigtop/current/oozie-client/"
+  oozie_setup_sh = "/usr/bigtop/current/oozie-client/bin/oozie-setup.sh"
+  oozie_webapps_dir = "/usr/bigtop/current/oozie-client/tomcat-deployment/webapps"
+  oozie_webapps_conf_dir = "/usr/bigtop/current/oozie-client/tomcat-deployment/conf"
+  oozie_libext_dir = "/usr/bigtop/current/oozie-client/libext"
+  oozie_server_dir = "/usr/bigtop/current/oozie-client/tomcat-deployment"
+  oozie_shared_lib = "/usr/bigtop/current/oozie-client/oozie-sharelib.tar.gz"
+  oozie_home = "/usr/bigtop/current/oozie-client"
+  oozie_bin_dir = "/usr/bigtop/current/oozie-client/bin"
+  falcon_home = '/usr/bigtop/current/falcon-client'
+  tomcat_conf = "/etc/oozie/tomcat-conf.http/conf"
+  tomcat_conf_secure = "/etc/oozie/tomcat-conf.https/conf"
+
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_lib_home = "/usr/lib/hadoop/lib"
+  hive_lib_dir = "/usr/lib/hive/lib"
+  oozie_lib_dir = "/var/lib/oozie/"
+  oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
+  oozie_webapps_dir = "/var/lib/oozie/tomcat-deployment/webapps/"
+  oozie_webapps_conf_dir = "/var/lib/oozie/tomcat-deployment/conf"
+  oozie_libext_dir = "/usr/lib/oozie/libext"
+  oozie_server_dir = "/var/lib/oozie/tomcat-deployment"
+  oozie_shared_lib = "/usr/lib/oozie/oozie-sharelib.tar.gz"
+  oozie_home = "/usr/lib/oozie"
+  oozie_bin_dir = "/usr/bin"
+  falcon_home = '/usr/lib/falcon'
+  tomcat_conf = "/etc/oozie/tomcat-conf.http/conf"
+  tomcat_conf_secure = "/etc/oozie/tomcat-conf.https/conf"
+
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+conf_dir = "/etc/oozie/conf"
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
-conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
 user_group = config['configurations']['cluster-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
 oozie_tmp_dir = "/var/tmp/oozie"
 oozie_hdfs_user_dir = format("/user/{oozie_user}")
 oozie_pid_dir = status_params.oozie_pid_dir
 pid_file = status_params.pid_file
 hadoop_jar_location = "/usr/lib/hadoop/"
-hdp_stack_version = config['hostLevelParams']['stack_version']
-# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
-ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
-oozie_libext_dir = "/usr/lib/oozie/libext"
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
+hive_jar_files = ""
+
+if not os.path.exists(hive_lib_dir):
+    raise Fail("Could not find Hive library directory: %s" % (hive_lib_dir))
+
+for entry in os.listdir(hive_lib_dir):
+    absolute_path = os.path.join(hive_lib_dir, entry)
+    if os.path.isfile(absolute_path) and not os.path.islink(absolute_path):
+        if fnmatch.fnmatchcase(entry, "hive-*.jar"):
+            if (len(hive_jar_files) == 0):
+                hive_jar_files = absolute_path
+            else:
+                hive_jar_files = hive_jar_files + "," + absolute_path
+
+catalina_properties_common_loader = "/usr/lib/hive-hcatalog/share/hcatalog/*.jar,/usr/lib/hive-hcatalog/share/webhcat/java-client/*.jar"
+
+if (len(hive_jar_files) != 0):
+    catalina_properties_common_loader = hive_jar_files + "," + catalina_properties_common_loader
+
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
@@ -53,7 +109,6 @@ oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
 oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
 
 java_home = config['hostLevelParams']['java_home']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
@@ -64,24 +119,16 @@ oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
 oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
 oozie_env_sh_template = config['configurations']['oozie-env']['content']
-oozie_lib_dir = "/var/lib/oozie/"
-oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
-oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
-oozie_shared_lib = "/usr/lib/oozie/share"
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 
-if str(hdp_stack_version).startswith('2.0') or str(hdp_stack_version).startswith('2.1'):
-  put_shared_lib_to_hdfs_cmd = format("hadoop dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
-# for newer
-else:
-  put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
   
 jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
 
 if jdbc_driver_name == "com.mysql.jdbc.Driver":
   jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
 elif jdbc_driver_name == "org.postgresql.Driver":
-  jdbc_driver_jar = "/usr/lib/oozie/libserver/postgresql-9.0-801.jdbc4.jar"
+  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")
 elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
   jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
 else:
@@ -91,7 +138,6 @@ hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
 has_falcon_host = not len(falcon_host)  == 0
-falcon_home = '/usr/lib/falcon'
 
 #oozie-log4j.properties
 if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
@@ -103,7 +149,6 @@ oozie_hdfs_user_dir = format("/user/{oozie_user}")
 oozie_hdfs_user_mode = 0775
 #for create_hdfs_directory
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -117,5 +162,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )

+ 3 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/service_check.py

@@ -43,12 +43,13 @@ def oozie_smoke_shell_file(
   os_family = System.get_instance().os_family
   
   if params.security_enabled:
-    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
+    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
   else:
-    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
+    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")
 
   Execute( format("{tmp_dir}/{file_name}"),
     command   = sh_cmd,
+    path      = params.execute_path,
     tries     = 3,
     try_sleep = 5,
     logoutput = True

+ 81 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/templates/catalina.properties.j2

@@ -0,0 +1,81 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# List of comma-separated packages that start with or equal this string
+# will cause a security exception to be thrown when
+# passed to checkPackageAccess unless the
+# corresponding RuntimePermission ("accessClassInPackage."+package) has
+# been granted.
+package.access=sun.,org.apache.catalina.,org.apache.coyote.,org.apache.tomcat.,org.apache.jasper.,sun.beans.
+#
+# List of comma-separated packages that start with or equal this string
+# will cause a security exception to be thrown when
+# passed to checkPackageDefinition unless the
+# corresponding RuntimePermission ("defineClassInPackage."+package) has
+# been granted.
+#
+# by default, no packages are restricted for definition, and none of
+# the class loaders supplied with the JDK call checkPackageDefinition.
+#
+package.definition=sun.,java.,org.apache.catalina.,org.apache.coyote.,org.apache.tomcat.,org.apache.jasper.
+
+#
+#
+# List of comma-separated paths defining the contents of the "common"
+# classloader. Prefixes should be used to define what is the repository type.
+# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
+# If left as blank,the JVM system loader will be used as Catalina's "common"
+# loader.
+# Examples:
+#     "foo": Add this folder as a class repository
+#     "foo/*.jar": Add all the JARs of the specified folder as class
+#                  repositories
+#     "foo/bar.jar": Add bar.jar as a class repository
+common.loader=/var/lib/oozie/*.jar,/usr/lib/hadoop/client/*.jar,{{catalina_properties_common_loader}},/usr/lib/oozie/libserver/*.jar,${catalina.home}/lib,${catalina.home}/lib/*.jar
+
+#
+# List of comma-separated paths defining the contents of the "server"
+# classloader. Prefixes should be used to define what is the repository type.
+# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
+# If left as blank, the "common" loader will be used as Catalina's "server"
+# loader.
+# Examples:
+#     "foo": Add this folder as a class repository
+#     "foo/*.jar": Add all the JARs of the specified folder as class
+#                  repositories
+#     "foo/bar.jar": Add bar.jar as a class repository
+server.loader=
+
+#
+# List of comma-separated paths defining the contents of the "shared"
+# classloader. Prefixes should be used to define what is the repository type.
+# Path may be relative to the CATALINA_BASE path or absolute. If left as blank,
+# the "common" loader will be used as Catalina's "shared" loader.
+# Examples:
+#     "foo": Add this folder as a class repository
+#     "foo/*.jar": Add all the JARs of the specified folder as class
+#                  repositories
+#     "foo/bar.jar": Add bar.jar as a class repository
+# Please note that for single jars, e.g. bar.jar, you need the URL form
+# starting with file:.
+shared.loader=
+
+#
+# String cache configuration.
+tomcat.util.buf.StringCache.byte.enabled=true
+#tomcat.util.buf.StringCache.char.enabled=true
+#tomcat.util.buf.StringCache.trainThreshold=500000
+#tomcat.util.buf.StringCache.cacheSize=5000

+ 1 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/templates/oozie-log4j.properties.j2

@@ -51,7 +51,7 @@ log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
 log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
 log4j.appender.oozie.Append=true
 log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
 
 log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd

+ 14 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/PIG/package/scripts/params.py

@@ -25,8 +25,21 @@ from resource_management import *
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-pig_conf_dir = "/etc/pig/conf"
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+  hadoop_home = '/usr/bigtop/current/hadoop-client'
+  pig_bin_dir = '/usr/bigtop/current/pig-client/bin'
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  pig_bin_dir = ""
+
 hadoop_conf_dir = "/etc/hadoop/conf"
+pig_conf_dir = "/etc/pig/conf"
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
@@ -38,7 +51,6 @@ pig_env_sh_template = config['configurations']['pig-env']['content']
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
 
 pig_properties = config['configurations']['pig-properties']['content']
 

+ 1 - 0
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/PIG/package/scripts/pig.py

@@ -26,6 +26,7 @@ def pig():
   import params
 
   Directory( params.pig_conf_dir,
+    recursive = True,
     owner = params.hdfs_user,
     group = params.user_group
   )

+ 6 - 4
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/PIG/package/scripts/service_check.py

@@ -31,7 +31,7 @@ class PigServiceCheck(Script):
 
     cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    create_file_cmd = format("{cleanup_cmd}; hadoop --config {hadoop_conf_dir} dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
     test_cmd = format("fs -test -e {output_file}")
 
     ExecuteHadoop( create_file_cmd,
@@ -42,7 +42,8 @@ class PigServiceCheck(Script):
       # for kinit run
       keytab = params.smoke_user_keytab,
       security_enabled = params.security_enabled,
-      kinit_path_local = params.kinit_path_local
+      kinit_path_local = params.kinit_path_local,
+      bin_dir = params.hadoop_bin_dir
     )
 
     File( format("{tmp_dir}/pigSmoke.sh"),
@@ -53,13 +54,14 @@ class PigServiceCheck(Script):
     Execute( format("pig {tmp_dir}/pigSmoke.sh"),
       tries     = 3,
       try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
       user      = params.smokeuser
     )
 
     ExecuteHadoop( test_cmd,
       user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
+      conf_dir = params.hadoop_conf_dir,
+      bin_dir = params.hadoop_bin_dir
     )
 
 if __name__ == "__main__":

+ 0 - 54
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/configuration/sqoop-env.xml

@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- sqoop-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for sqoop-env.sh file</description>
-    <value>
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
-    </value>
-  </property>
-  <property>
-    <name>sqoop_user</name>
-    <description>User to run Sqoop as</description>
-    <property-type>USER</property-type>
-    <value>sqoop</value>
-  </property>
-</configuration>

+ 0 - 92
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/metainfo.xml

@@ -1,92 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <displayName>Sqoop</displayName>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.4.2.0</version>
-
-      <components>
-        <component>
-          <name>SQOOP</name>
-          <displayName>Sqoop</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>sqoop-env.sh</fileName>
-              <dictionaryName>sqoop-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>sqoop</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>sqoop-env</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

+ 0 - 19
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/__init__.py

@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

+ 0 - 37
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/params.py

@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
-
-sqoop_conf_dir = "/usr/lib/sqoop/conf"
-hbase_home = "/usr"
-hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
-sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
-
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

+ 0 - 37
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/service_check.py

@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-
-
-class SqoopServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
-    Execute("sqoop version",
-            user = params.smokeuser,
-            logoutput = True
-    )
-
-if __name__ == "__main__":
-  SqoopServiceCheck().execute()

+ 0 - 57
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/SQOOP/package/scripts/sqoop.py

@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-def sqoop(type=None):
-  import params
-  Link(params.sqoop_lib + "/mysql-connector-java.jar",
-       to = '/usr/share/java/mysql-connector-java.jar'
-  ) 
-  Directory(params.sqoop_conf_dir,
-            owner = params.sqoop_user,
-            group = params.user_group
-  )
-  
-  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
-    owner=params.sqoop_user,
-    content=InlineTemplate(params.sqoop_env_sh_template)
-  )
-  
-  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
-          owner = params.sqoop_user,
-          group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  pass
-
-def sqoop_TemplateConfig(name, tag=None):
-  import params
-  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
-                  owner = params.sqoop_user,
-                  template_tag = tag
-  )

+ 0 - 107
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/metainfo.xml

@@ -1,107 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <displayName>WebHCat</displayName>
-      <comment>Provides a REST-like web API for HCatalog and related Hadoop components.</comment>
-      <version>0.13.0.689</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive-hcatalog</name>
-            </package>
-            <package>
-              <name>hive-webhcat</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HIVE</service>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

+ 0 - 20
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py

@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

+ 0 - 83
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/params.py

@@ -1,83 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-
-if str(config['hostLevelParams']['stack_version']).startswith('2.0'):
-  config_dir = '/etc/hcatalog/conf'
-  webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-# for newer versions
-else:
-  config_dir = '/etc/hive-webhcat/conf'
-  webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
-
-webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
-templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-hadoop_home = '/usr/lib/hadoop'
-user_group = config['configurations']['cluster-env']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-hcat_hdfs_user_dir = format("/user/{hcat_user}")
-hcat_hdfs_user_mode = 0755
-webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-webhcat_hdfs_user_mode = 0755
-webhcat_apps_dir = "/apps/webhcat"
-#for create_hdfs_directory
-hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
-security_param = "true" if security_enabled else "false"
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
-)

+ 0 - 45
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/service_check.py

@@ -1,45 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-
-    File(format("{tmp_dir}/templetonSmoke.sh"),
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_param} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()

+ 0 - 26
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/status_params.py

@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-templeton_pid_dir = config['configurations']['hive-env']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

+ 0 - 6
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml

@@ -170,12 +170,6 @@
     <description>Virtual memory for single Reduce task</description>
   </property>
 
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <value>/etc/security/keytabs/jhs.service.keytab</value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
   <property>
     <name>mapreduce.shuffle.port</name>
     <value>13562</value>

+ 9 - 1
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml

@@ -15,7 +15,7 @@
    limitations under the License.
 -->
 
-<configuration supports_final="false">
+<configuration supports_final="false" supports_adding_forbidden="true">
 
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>
@@ -120,5 +120,13 @@
     </description>
   </property>
 
+  <property>
+    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>
+      Default minimum queue resource limit depends on the number of users who have submitted applications.
+    </description>
+  </property>
+
 
 </configuration>

部分文件因为文件数量过多而无法显示