Pārlūkot izejas kodu

AMBARI-7257 Use Versioned RPMS for HDP 2.2 stack and make it plugabable to be able to reuse the scripts for HDP 2.* (dsen)

Dmytro Sen 10 gadi atpakaļ
vecāks
revīzija
7d9feb6afa
86 mainītis faili ar 1636 papildinājumiem un 196 dzēšanām
  1. 8 1
      ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py
  2. 12 4
      ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
  3. 1 0
      ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py
  4. 1 0
      ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py
  5. 15 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  6. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
  7. 3 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
  8. 19 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  9. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
  10. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
  11. 12 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
  12. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
  13. 29 8
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
  14. 3 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
  15. 3 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
  16. 9 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
  17. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
  18. 24 10
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  19. 20 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
  20. 6 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
  21. 6 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
  22. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
  23. 6 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
  24. 4 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
  25. 49 24
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
  26. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
  27. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
  28. 4 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
  29. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
  30. 18 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
  31. 17 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
  32. 6 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
  33. 9 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
  34. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml
  35. 30 11
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
  36. 7 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
  37. 31 14
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
  38. 3 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
  39. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
  40. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
  41. 7 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
  42. 13 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
  43. 13 2
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
  44. 2 3
      ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
  45. 23 0
      ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml
  46. 82 0
      ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
  47. 88 0
      ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
  48. 28 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml
  49. 40 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml
  50. 42 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml
  51. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
  52. 34 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml
  53. 68 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml
  54. 44 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
  55. 38 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml
  56. 28 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml
  57. 41 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml
  58. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml
  59. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml
  60. 54 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
  61. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml
  62. 40 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml
  63. 59 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml
  64. 44 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml
  65. 36 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
  66. 35 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
  67. 71 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml
  68. 40 0
      ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml
  69. 15 3
      ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
  70. 6 0
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  71. 6 0
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
  72. 5 5
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
  73. 25 11
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  74. 15 3
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
  75. 8 2
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
  76. 8 0
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
  77. 18 0
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
  78. 7 0
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
  79. 6 4
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
  80. 8 4
      ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
  81. 12 0
      ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py
  82. 12 0
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
  83. 12 0
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
  84. 6 3
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
  85. 2 0
      ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
  86. 7 0
      ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py

+ 8 - 1
ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py

@@ -19,6 +19,7 @@ limitations under the License.
 Ambari Agent
 Ambari Agent
 
 
 """
 """
+import os
 
 
 from resource_management import *
 from resource_management import *
 
 
@@ -27,6 +28,7 @@ class ExecuteHadoopProvider(Provider):
     kinit__path_local = self.resource.kinit_path_local
     kinit__path_local = self.resource.kinit_path_local
     keytab = self.resource.keytab
     keytab = self.resource.keytab
     conf_dir = self.resource.conf_dir
     conf_dir = self.resource.conf_dir
+    bin_dir = self.resource.bin_dir
     command = self.resource.command
     command = self.resource.command
     principal = self.resource.principal
     principal = self.resource.principal
     
     
@@ -39,10 +41,15 @@ class ExecuteHadoopProvider(Provider):
           path = ['/bin'],
           path = ['/bin'],
           user = self.resource.user
           user = self.resource.user
         )
         )
-    
+
+      path = os.environ['PATH']
+      if bin_dir is not None:
+        path += os.pathsep + bin_dir
+
       Execute (format("hadoop --config {conf_dir} {command}"),
       Execute (format("hadoop --config {conf_dir} {command}"),
         user        = self.resource.user,
         user        = self.resource.user,
         tries       = self.resource.tries,
         tries       = self.resource.tries,
         try_sleep   = self.resource.try_sleep,
         try_sleep   = self.resource.try_sleep,
         logoutput   = self.resource.logoutput,
         logoutput   = self.resource.logoutput,
+        environment = {'PATH' : path}
       )
       )

+ 12 - 4
ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py

@@ -19,6 +19,7 @@ limitations under the License.
 Ambari Agent
 Ambari Agent
 
 
 """
 """
+import os
 
 
 from resource_management import *
 from resource_management import *
 directories_list = [] #direcotries list for mkdir
 directories_list = [] #direcotries list for mkdir
@@ -68,6 +69,7 @@ class HdfsDirectoryProvider(Provider):
     secured = self.resource.security_enabled
     secured = self.resource.security_enabled
     keytab_file = self.resource.keytab
     keytab_file = self.resource.keytab
     kinit_path = self.resource.kinit_path_local
     kinit_path = self.resource.kinit_path_local
+    bin_dir = self.resource.bin_dir
 
 
     chmod_commands = []
     chmod_commands = []
     chown_commands = []
     chown_commands = []
@@ -76,7 +78,7 @@ class HdfsDirectoryProvider(Provider):
       mode = chmod_key[0]
       mode = chmod_key[0]
       recursive = chmod_key[1]
       recursive = chmod_key[1]
       chmod_dirs_str = ' '.join(chmod_dirs)
       chmod_dirs_str = ' '.join(chmod_dirs)
-      chmod_commands.append(format("hadoop fs -chmod {recursive} {mode} {chmod_dirs_str}"))
+      chmod_commands.append(format("hadoop --config {hdp_conf_dir} fs -chmod {recursive} {mode} {chmod_dirs_str}"))
 
 
     for chown_key, chown_dirs in chown_map.items():
     for chown_key, chown_dirs in chown_map.items():
       owner = chown_key[0]
       owner = chown_key[0]
@@ -87,7 +89,7 @@ class HdfsDirectoryProvider(Provider):
         chown = owner
         chown = owner
         if group:
         if group:
           chown = format("{owner}:{group}")
           chown = format("{owner}:{group}")
-        chown_commands.append(format("hadoop fs -chown {recursive} {chown} {chown_dirs_str}"))
+        chown_commands.append(format("hadoop --config {hdp_conf_dir} fs -chown {recursive} {chown} {chown_dirs_str}"))
 
 
     if secured:
     if secured:
         Execute(format("{kinit_path} -kt {keytab_file} {hdfs_principal_name}"),
         Execute(format("{kinit_path} -kt {keytab_file} {hdfs_principal_name}"),
@@ -97,11 +99,17 @@ class HdfsDirectoryProvider(Provider):
     #for hadoop 2 we need to specify -p to create directories recursively
     #for hadoop 2 we need to specify -p to create directories recursively
     parent_flag = '`rpm -q hadoop | grep -q "hadoop-1" || echo "-p"`'
     parent_flag = '`rpm -q hadoop | grep -q "hadoop-1" || echo "-p"`'
 
 
-    Execute(format('hadoop fs -mkdir {parent_flag} {dir_list_str} && {chmod_cmd} && {chown_cmd}',
+    path = os.environ['PATH']
+    if bin_dir is not None:
+      path += os.pathsep + bin_dir
+
+    Execute(format('hadoop --config {hdp_conf_dir} fs -mkdir {parent_flag} {dir_list_str} && {chmod_cmd} && {chown_cmd}',
                    chmod_cmd=' && '.join(chmod_commands),
                    chmod_cmd=' && '.join(chmod_commands),
                    chown_cmd=' && '.join(chown_commands)),
                    chown_cmd=' && '.join(chown_commands)),
             user=hdp_hdfs_user,
             user=hdp_hdfs_user,
-            not_if=format("su - {hdp_hdfs_user} -c 'hadoop fs -ls {dir_list_str}'")
+            environment = {'PATH' : path},
+            not_if=format("su - {hdp_hdfs_user} -c 'export PATH=$PATH:{bin_dir} ; "
+                          "hadoop --config {hdp_conf_dir} fs -ls {dir_list_str}'")
     )
     )
 
 
     directories_list[:] = []
     directories_list[:] = []

+ 1 - 0
ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py

@@ -32,6 +32,7 @@ class ExecuteHadoop(Resource):
   user = ResourceArgument()
   user = ResourceArgument()
   logoutput = BooleanArgument(default=False)
   logoutput = BooleanArgument(default=False)
   principal = ResourceArgument(default=lambda obj: obj.user)
   principal = ResourceArgument(default=lambda obj: obj.user)
+  bin_dir = ResourceArgument() # appended to $PATH
   
   
   conf_dir = ResourceArgument()
   conf_dir = ResourceArgument()
   
   

+ 1 - 0
ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py

@@ -38,6 +38,7 @@ class HdfsDirectory(Resource):
   keytab = ResourceArgument()
   keytab = ResourceArgument()
   kinit_path_local = ResourceArgument()
   kinit_path_local = ResourceArgument()
   hdfs_user = ResourceArgument()
   hdfs_user = ResourceArgument()
+  bin_dir = ResourceArgument(default="")
 
 
   #action 'create' immediately creates all pending directory in efficient manner
   #action 'create' immediately creates all pending directory in efficient manner
   #action 'create_delayed' add directory to list of pending directories
   #action 'create_delayed' add directory to list of pending directories

+ 15 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -19,17 +19,29 @@ limitations under the License.
 
 
 from resource_management import *
 from resource_management import *
 from resource_management.core.system import System
 from resource_management.core.system import System
-import os
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_conf_empty_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf.empty")
+  mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+  hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
 #security params
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 #java params
 #java params
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
 #hadoop params
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -56,8 +68,6 @@ ttnode_heapsize = "1024m"
 
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 
 #users and groups
 #users and groups

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py

@@ -19,7 +19,6 @@ limitations under the License.
 
 
 from resource_management import *
 from resource_management import *
 from resource_management.core.system import System
 from resource_management.core.system import System
-import os
 import json
 import json
 import collections
 import collections
 
 
@@ -38,6 +37,8 @@ user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
 #hosts
 #hosts
 hostname = config["hostname"]
 hostname = config["hostname"]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

+ 3 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh

@@ -24,6 +24,8 @@ export hdfs_user=$1
 shift
 shift
 export conf_dir=$1
 export conf_dir=$1
 shift
 shift
+export bin_dir=$1
+shift
 export mark_dir=$1
 export mark_dir=$1
 shift
 shift
 export name_dirs=$*
 export name_dirs=$*
@@ -50,6 +52,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
   done
 
 
   if [[ $EXIT_CODE == 0 ]] ; then
   if [[ $EXIT_CODE == 0 ]] ; then
+    export PATH=$PATH:$bin_dir
     su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
     su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
   else
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"

+ 19 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -23,6 +23,25 @@ import os
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+  hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+  hadoop_lib_home = format("/usr/hdp/{rpm_version}/hadoop/lib")
+  hadoop_bin = format("/usr/hdp/{rpm_version}/hadoop/sbin")
+  hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_lib_home = "/usr/lib/hadoop/lib"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_home = '/usr'
+
 #security params
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
@@ -72,11 +91,7 @@ if has_ganglia_server:
 
 
 if has_namenode:
 if has_namenode:
   hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
   hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
 
 
@@ -127,8 +142,6 @@ ttnode_heapsize = "1024m"
 
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 
 #log4j.properties
 #log4j.properties

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py

@@ -63,7 +63,7 @@ def flume(action = None):
       _set_desired_state('STARTED')
       _set_desired_state('STARTED')
       
       
     flume_base = format('su -s /bin/bash {flume_user} -c "export JAVA_HOME={java_home}; '
     flume_base = format('su -s /bin/bash {flume_user} -c "export JAVA_HOME={java_home}; '
-      '/usr/bin/flume-ng agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
+      '{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
 
 
     for agent in cmd_target_names():
     for agent in cmd_target_names():
       flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
       flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py

@@ -31,7 +31,7 @@ class FlumeServiceCheck(Script):
       Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
       Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
               user=params.smoke_user)
               user=params.smoke_user)
 
 
-    Execute(format('env JAVA_HOME={java_home} /usr/bin/flume-ng version'),
+    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
             logoutput=True,
             logoutput=True,
             tries = 3,
             tries = 3,
             try_sleep = 20)
             try_sleep = 20)

+ 12 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py

@@ -26,9 +26,19 @@ proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 
 security_enabled = False
 security_enabled = False
 
 
-java_home = config['hostLevelParams']['java_home']
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  flume_conf_dir = format('/usr/hdp/{rpm_version}/etc/flume/conf')
+  flume_bin = format('/usr/hdp/{rpm_version}/flume/bin/flume-ng')
 
 
-flume_conf_dir = '/etc/flume/conf'
+else:
+  flume_conf_dir = '/etc/flume/conf'
+  flume_bin = '/usr/bin/flume-ng'
+
+java_home = config['hostLevelParams']['java_home']
 flume_log_dir = '/var/log/flume'
 flume_log_dir = '/var/log/flume'
 flume_run_dir = '/var/run/flume'
 flume_run_dir = '/var/run/flume'
 flume_user = 'flume'
 flume_user = 'flume'

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh

@@ -21,7 +21,8 @@
 #
 #
 conf_dir=$1
 conf_dir=$1
 data=$2
 data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
 cat /tmp/hbase_chk_verify
 cat /tmp/hbase_chk_verify
 echo "Looking for $data"
 echo "Looking for $data"
 grep -q $data /tmp/hbase_chk_verify
 grep -q $data /tmp/hbase_chk_verify

+ 29 - 8
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py

@@ -26,11 +26,32 @@ import status_params
 config = Script.get_config()
 config = Script.get_config()
 exec_tmp_dir = Script.get_tmp_dir()
 exec_tmp_dir = Script.get_tmp_dir()
 
 
-hbase_conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-hbase_cmd = "/usr/lib/hbase/bin/hbase"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+#RPM versioning support
+  rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  hbase_conf_dir = format('/usr/hdp/{rpm_version}/etc/hbase/conf')
+  daemon_script = format('/usr/hdp/{rpm_version}/hbase/bin/hbase-daemon.sh')
+  region_mover = format('/usr/hdp/{rpm_version}/hbase/bin/region_mover.rb')
+  region_drainer = format('/usr/hdp/{rpm_version}hbase/bin/draining_servers.rb')
+  hbase_cmd = format('/usr/hdp/{rpm_version}/hbase/bin/hbase')
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_bin_dir = "/usr/bin"
+  hbase_conf_dir = "/etc/hbase/conf"
+  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 hbase_included_hosts = config['commandParams']['included_hosts']
 hbase_included_hosts = config['commandParams']['included_hosts']
@@ -72,7 +93,7 @@ if 'slave_hosts' in config['clusterHostInfo']:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
 else:
 else:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
-  
+
 smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smokeuser_permissions = "RWXCA"
 smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 service_check_data = functions.get_unique_id_and_date()
@@ -105,7 +126,6 @@ hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
 hbase_staging_dir = "/apps/hbase/staging"
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 #for create_hdfs_directory
 hostname = config["hostname"]
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -119,5 +139,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )

+ 3 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py

@@ -44,7 +44,7 @@ class HbaseServiceCheck(Script):
     
     
     if params.security_enabled:    
     if params.security_enabled:    
       hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
       hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
-      grantprivelegecmd = format("{kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
   
   
       File( hbase_grant_premissions_file,
       File( hbase_grant_premissions_file,
         owner   = params.hbase_user,
         owner   = params.hbase_user,
@@ -57,8 +57,8 @@ class HbaseServiceCheck(Script):
         user = params.hbase_user,
         user = params.hbase_user,
       )
       )
 
 
-    servicecheckcmd = format("{smokeuser_kinit_cmd} hbase --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data}")
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
   
   
     Execute( servicecheckcmd,
     Execute( servicecheckcmd,
       tries     = 3,
       tries     = 3,

+ 3 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh

@@ -24,6 +24,8 @@ export hdfs_user=$1
 shift
 shift
 export conf_dir=$1
 export conf_dir=$1
 shift
 shift
+export bin_dir=$1
+shift
 export old_mark_dir=$1
 export old_mark_dir=$1
 shift
 shift
 export mark_dir=$1
 export mark_dir=$1
@@ -56,7 +58,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
   done
 
 
   if [[ $EXIT_CODE == 0 ]] ; then
   if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    su - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | hadoop --config ${conf_dir} ${command}"
   else
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
   fi
   fi

+ 9 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py

@@ -45,11 +45,11 @@ def namenode(action=None, do_format=True):
       create_log_dir=True
       create_log_dir=True
     )
     )
     if params.dfs_ha_enabled:
     if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = format("su - {hdfs_user} -c 'hdfs haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
+      dfs_check_nn_status_cmd = format("su - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
     else:
     else:
       dfs_check_nn_status_cmd = None
       dfs_check_nn_status_cmd = None
 
 
-    namenode_safe_mode_off = format("su - {hdfs_user} -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'")
+    namenode_safe_mode_off = format("su - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} dfsadmin -safemode get' | grep 'Safe mode is OFF'")
 
 
     if params.security_enabled:
     if params.security_enabled:
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
@@ -110,14 +110,16 @@ def format_namenode(force=None):
   if not params.dfs_ha_enabled:
   if not params.dfs_ha_enabled:
     if force:
     if force:
       ExecuteHadoop('namenode -format',
       ExecuteHadoop('namenode -format',
-                    kinit_override=True)
+                    kinit_override=True,
+                    bin_dir=params.hadoop_bin_dir,
+                    conf_dir=hadoop_conf_dir)
     else:
     else:
       File(format("{tmp_dir}/checkForFormat.sh"),
       File(format("{tmp_dir}/checkForFormat.sh"),
            content=StaticFile("checkForFormat.sh"),
            content=StaticFile("checkForFormat.sh"),
            mode=0755)
            mode=0755)
       Execute(format(
       Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {old_mark_dir} "
-        "{mark_dir} {dfs_name_dir}"),
+        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
+        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
               not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
               not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
       )
       )
@@ -154,4 +156,5 @@ def decommission():
   ExecuteHadoop(nn_refresh_cmd,
   ExecuteHadoop(nn_refresh_cmd,
                 user=hdfs_user,
                 user=hdfs_user,
                 conf_dir=conf_dir,
                 conf_dir=conf_dir,
-                kinit_override=True)
+                kinit_override=True,
+                bin_dir=params.hadoop_bin_dir)

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py

@@ -88,7 +88,7 @@ class NameNode(Script):
     
     
     
     
     def startRebalancingProcess(threshold):
     def startRebalancingProcess(threshold):
-      rebalanceCommand = format('hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      rebalanceCommand = format('export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
       return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
       return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
     
     
     command = startRebalancingProcess(threshold)
     command = startRebalancingProcess(threshold)

+ 24 - 10
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -24,6 +24,28 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_conf_empty_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf.empty")
+  mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+  hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+  hadoop_bin = format("/usr/hdp/{rpm_version}/hadoop/sbin")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  limits_conf_dir = format("/usr/hdp/{rpm_version}/etc/security/limits.d")
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_bin_dir = "/usr/bin"
+  limits_conf_dir = "/etc/security/limits.d"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
 ulimit_cmd = "ulimit -c unlimited; "
 ulimit_cmd = "ulimit -c unlimited; "
 
 
 #security params
 #security params
@@ -100,9 +122,7 @@ proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 nagios_group = config['configurations']['nagios-env']['nagios_group']
 
 
 #hadoop params
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -110,8 +130,6 @@ hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'
 dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
 
 
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
 jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
 jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
 
 
 dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
 dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
@@ -171,11 +189,10 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )
 
 
-limits_conf_dir = "/etc/security/limits.d"
-
 io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
 io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
 if not "com.hadoop.compression.lzo" in io_compression_codecs:
 if not "com.hadoop.compression.lzo" in io_compression_codecs:
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
@@ -184,8 +201,6 @@ else:
 name_node_params = default("/commandParams/namenode", None)
 name_node_params = default("/commandParams/namenode", None)
 
 
 #hadoop params
 #hadoop params
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
 hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 
 #hadoop-env.sh
 #hadoop-env.sh
@@ -209,5 +224,4 @@ ttnode_heapsize = "1024m"
 
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

+ 20 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py

@@ -31,13 +31,14 @@ class HdfsServiceCheck(Script):
 
 
     safemode_command = "dfsadmin -safemode get | grep OFF"
     safemode_command = "dfsadmin -safemode get | grep OFF"
 
 
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
+    create_dir_cmd = format("fs -mkdir {dir}")
+    chmod_command = format("fs -chmod 777 {dir}")
+    test_dir_exists = format("hadoop --config {hadoop_conf_dir} fs -test -e {dir}")
     cleanup_cmd = format("fs -rm {tmp_file}")
     cleanup_cmd = format("fs -rm {tmp_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file
     #cleanup put below to handle retries; if retrying there wil be a stale file
     #that needs cleanup; exit code is fn of second command
     #that needs cleanup; exit code is fn of second command
     create_file_cmd = format(
     create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
     test_cmd = format("fs -test -e {tmp_file}")
     test_cmd = format("fs -test -e {tmp_file}")
     if params.security_enabled:
     if params.security_enabled:
       Execute(format(
       Execute(format(
@@ -48,7 +49,8 @@ class HdfsServiceCheck(Script):
                   logoutput=True,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
                   try_sleep=3,
-                  tries=20
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
     )
     )
     ExecuteHadoop(create_dir_cmd,
     ExecuteHadoop(create_dir_cmd,
                   user=params.smoke_user,
                   user=params.smoke_user,
@@ -56,21 +58,32 @@ class HdfsServiceCheck(Script):
                   not_if=test_dir_exists,
                   not_if=test_dir_exists,
                   conf_dir=params.hadoop_conf_dir,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(chmod_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     )
     ExecuteHadoop(create_file_cmd,
     ExecuteHadoop(create_file_cmd,
                   user=params.smoke_user,
                   user=params.smoke_user,
                   logoutput=True,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     )
     ExecuteHadoop(test_cmd,
     ExecuteHadoop(test_cmd,
                   user=params.smoke_user,
                   user=params.smoke_user,
                   logoutput=True,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     )
     if params.has_journalnode_hosts:
     if params.has_journalnode_hosts:
       journalnode_port = params.journalnode_port
       journalnode_port = params.journalnode_port

+ 6 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py

@@ -25,6 +25,12 @@ import sys
 def hcat():
 def hcat():
   import params
   import params
 
 
+  Directory(params.hive_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+
   Directory(params.hcat_conf_dir,
   Directory(params.hcat_conf_dir,
             owner=params.hcat_user,
             owner=params.hcat_user,
             group=params.user_group,
             group=params.user_group,

+ 6 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py

@@ -45,6 +45,7 @@ def hcat_service_check():
             user=params.smokeuser,
             user=params.smokeuser,
             try_sleep=5,
             try_sleep=5,
             path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
             path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            environment = {'PATH' : params.execute_path},
             logoutput=True)
             logoutput=True)
 
 
     if params.security_enabled:
     if params.security_enabled:
@@ -55,7 +56,8 @@ def hcat_service_check():
                     security_enabled=params.security_enabled,
                     security_enabled=params.security_enabled,
                     kinit_path_local=params.kinit_path_local,
                     kinit_path_local=params.kinit_path_local,
                     keytab=params.hdfs_user_keytab,
                     keytab=params.hdfs_user_keytab,
-                    principal=params.hdfs_principal_name
+                    principal=params.hdfs_principal_name,
+                    bin_dir=params.hive_bin
       )
       )
     else:
     else:
       ExecuteHadoop(test_cmd,
       ExecuteHadoop(test_cmd,
@@ -64,7 +66,8 @@ def hcat_service_check():
                     conf_dir=params.hadoop_conf_dir,
                     conf_dir=params.hadoop_conf_dir,
                     security_enabled=params.security_enabled,
                     security_enabled=params.security_enabled,
                     kinit_path_local=params.kinit_path_local,
                     kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab
+                    keytab=params.hdfs_user_keytab,
+                    bin_dir=params.hive_bin
       )
       )
 
 
     cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
     cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
@@ -72,6 +75,7 @@ def hcat_service_check():
     Execute(cleanup_cmd,
     Execute(cleanup_cmd,
             tries=3,
             tries=3,
             user=params.smokeuser,
             user=params.smokeuser,
+            environment = {'PATH' : params.execute_path },
             try_sleep=5,
             try_sleep=5,
             path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
             path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
             logoutput=True
             logoutput=True

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py

@@ -188,6 +188,7 @@ def jdbc_connector():
     Execute(cmd,
     Execute(cmd,
             not_if=format("test -f {target}"),
             not_if=format("test -f {target}"),
             creates=params.target,
             creates=params.target,
+            environment= {'PATH' : params.execute_path },
             path=["/bin", "/usr/bin/"])
             path=["/bin", "/usr/bin/"])
   elif params.hive_jdbc_driver == "org.postgresql.Driver":
   elif params.hive_jdbc_driver == "org.postgresql.Driver":
     cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
     cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
@@ -195,6 +196,7 @@ def jdbc_connector():
     Execute(cmd,
     Execute(cmd,
             not_if=format("test -f {target}"),
             not_if=format("test -f {target}"),
             creates=params.target,
             creates=params.target,
+            environment= {'PATH' : params.execute_path },
             path=["/bin", "usr/bin/"])
             path=["/bin", "usr/bin/"])
 
 
   elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
   elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":

+ 6 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py

@@ -49,6 +49,7 @@ def hive_service(
     
     
     Execute(demon_cmd,
     Execute(demon_cmd,
             user=params.hive_user,
             user=params.hive_user,
+            environment= {'PATH' : params.execute_path, 'HADOOP_HOME' : params.hadoop_home },
             not_if=process_id_exists
             not_if=process_id_exists
     )
     )
 
 
@@ -103,8 +104,10 @@ def hive_service(
 def check_fs_root():
 def check_fs_root():
   import params  
   import params  
   fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
   fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
-  cmd = "/usr/lib/hive/bin/metatool -listFSRoot 2>/dev/null | grep hdfs://"
+  cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs://")
   code, out = call(cmd, user=params.hive_user)
   code, out = call(cmd, user=params.hive_user)
   if code == 0 and fs_root_url.strip() != out.strip():
   if code == 0 and fs_root_url.strip() != out.strip():
-    cmd = format("/usr/lib/hive/bin/metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
-    Execute(cmd, user=params.hive_user)
+    cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
+    Execute(cmd,
+            environment= {'PATH' : params.execute_path },
+            user=params.hive_user)

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py

@@ -69,7 +69,8 @@ def install_tez_jars():
                     owner=params.tez_user,
                     owner=params.tez_user,
                     dest_dir=app_dir_path,
                     dest_dir=app_dir_path,
                     kinnit_if_needed=kinit_if_needed,
                     kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user
+                    hdfs_user=params.hdfs_user,
+                    hadoop_conf_dir=params.hadoop_conf_dir
       )
       )
     pass
     pass
 
 
@@ -79,7 +80,8 @@ def install_tez_jars():
                     owner=params.tez_user,
                     owner=params.tez_user,
                     dest_dir=lib_dir_path,
                     dest_dir=lib_dir_path,
                     kinnit_if_needed=kinit_if_needed,
                     kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user
+                    hdfs_user=params.hdfs_user,
+                    hadoop_conf_dir=params.hadoop_conf_dir
       )
       )
     pass
     pass
 
 

+ 49 - 24
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py

@@ -26,6 +26,53 @@ import os
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+hdp_stack_version = config['hostLevelParams']['stack_version']
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+  hive_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf')
+  hive_client_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf')
+  hive_server_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf.server')
+  hive_bin = format('/usr/hdp/{rpm_version}/hive/bin')
+  hive_lib = format('/usr/hdp/{rpm_version}/hive/lib')
+  tez_local_api_jars = format('/usr/hdp/{rpm_version}/tez/tez*.jar')
+  tez_local_lib_jars = format('/usr/hdp/{rpm_version}/tez/lib/*.jar')
+
+  if str(hdp_stack_version).startswith('2.0'):
+    hcat_conf_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
+    hcat_lib = format('/usr/hdp/{rpm_version}/hive/hcatalog/share/hcatalog')
+  # for newer versions
+  else:
+    hcat_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive-hcatalog/conf')
+    hcat_lib = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/share/hcatalog')
+
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  hive_conf_dir = "/etc/hive/conf"
+  hive_bin = '/usr/lib/hive/bin'
+  hive_lib = '/usr/lib/hive/lib/'
+  hive_client_conf_dir = "/etc/hive/conf"
+  hive_server_conf_dir = '/etc/hive/conf.server'
+  tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+  tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+
+  if str(hdp_stack_version).startswith('2.0'):
+    hcat_conf_dir = '/etc/hcatalog/conf'
+    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+  # for newer versions
+  else:
+    hcat_conf_dir = '/etc/hive-hcatalog/conf'
+    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin
 hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
 hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
 hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 
 
@@ -34,7 +81,6 @@ hive_metastore_db_type = config['configurations']['hive-env']['hive_database_typ
 
 
 #users
 #users
 hive_user = config['configurations']['hive-env']['hive_user']
 hive_user = config['configurations']['hive-env']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 #JDBC driver jar name
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
 if hive_jdbc_driver == "com.mysql.jdbc.Driver":
 if hive_jdbc_driver == "com.mysql.jdbc.Driver":
@@ -51,11 +97,9 @@ check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 
 
 #common
 #common
-hdp_stack_version = config['hostLevelParams']['stack_version']
 hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
 hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
 hive_var_lib = '/var/lib/hive'
 hive_var_lib = '/var/lib/hive'
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-hive_bin = '/usr/lib/hive/bin'
 hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
@@ -77,8 +121,6 @@ hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 hive_pid = status_params.hive_pid
 #Default conf dir for client
 #Default conf dir for client
-hive_client_conf_dir = "/etc/hive/conf"
-hive_server_conf_dir = "/etc/hive/conf.server"
 hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
 hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
 
 
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
@@ -92,8 +134,6 @@ hive_database_name = config['configurations']['hive-env']['hive_database_name']
 #Starting hiveserver2
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh.j2'
 start_hiveserver2_script = 'startHiveserver2.sh.j2'
 
 
-hadoop_home = '/usr'
-
 ##Starting metastore
 ##Starting metastore
 start_metastore_script = 'startMetastore.sh'
 start_metastore_script = 'startMetastore.sh'
 hive_metastore_pid = status_params.hive_metastore_pid
 hive_metastore_pid = status_params.hive_metastore_pid
@@ -133,15 +173,6 @@ else:
 
 
 ########## HCAT
 ########## HCAT
 
 
-if str(hdp_stack_version).startswith('2.0'):
-  hcat_conf_dir = '/etc/hcatalog/conf'
-  hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-# for newer versions
-else:
-  hcat_conf_dir = '/etc/hive-hcatalog/conf'
-  hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
-
-
 hcat_dbroot = hcat_lib
 hcat_dbroot = hcat_lib
 
 
 hcat_user = config['configurations']['hive-env']['hcat_user']
 hcat_user = config['configurations']['hive-env']['hcat_user']
@@ -150,8 +181,6 @@ webhcat_user = config['configurations']['hive-env']['webhcat_user']
 hcat_pid_dir = status_params.hcat_pid_dir
 hcat_pid_dir = status_params.hcat_pid_dir
 hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
 hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
 
 
-hadoop_conf_dir = '/etc/hadoop/conf'
-
 #hive-log4j.properties.template
 #hive-log4j.properties.template
 if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
 if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
   log4j_props = config['configurations']['hive-log4j']['content']
   log4j_props = config['configurations']['hive-log4j']['content']
@@ -172,7 +201,6 @@ hive_hdfs_user_mode = 0700
 hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 #for create_hdfs_directory
 #for create_hdfs_directory
 hostname = config["hostname"]
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -180,8 +208,6 @@ kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/
 
 
 # Tez libraries
 # Tez libraries
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
 tez_user = config['configurations']['tez-env']['tez_user']
 tez_user = config['configurations']['tez-env']['tez_user']
 
 
 if System.get_instance().os_family == "ubuntu":
 if System.get_instance().os_family == "ubuntu":
@@ -205,13 +231,12 @@ else:
 import functools
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code
 #to create hdfs directory we need to call params.HdfsDirectory in code
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
 HdfsDirectory = functools.partial(
 HdfsDirectory = functools.partial(
   HdfsDirectory,
   HdfsDirectory,
   conf_dir=hadoop_conf_dir,
   conf_dir=hadoop_conf_dir,
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2

@@ -25,5 +25,5 @@ HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.d
 HIVE_SERVER2_OPTS="${HIVE_SERVER2_OPTS} -hiveconf hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator -hiveconf hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory "
 HIVE_SERVER2_OPTS="${HIVE_SERVER2_OPTS} -hiveconf hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator -hiveconf hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory "
 {% endif %}
 {% endif %}
 
 
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
+HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
 echo $!|cat>$3
 echo $!|cat>$3

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml

@@ -122,7 +122,7 @@ export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
 # The base URL for callback URLs to Oozie
 # The base URL for callback URLs to Oozie
 #
 #
 # export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
 # export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64
     </value>
     </value>
   </property>
   </property>
 
 

+ 4 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh

@@ -93,10 +93,10 @@ else
   kinitcmd=""
   kinitcmd=""
 fi
 fi
 
 
-su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
-su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
 
 
 cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
 cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
 echo $cmd
 echo $cmd

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py

@@ -37,7 +37,7 @@ def oozie_service(action = 'start'): # 'start' or 'stop'
       db_connection_check_command = None
       db_connection_check_command = None
       
       
     cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
     cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
 
 
     if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
     if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
       print "ERROR: jdbc file " + params.jdbc_driver_jar + " is unavailable. Please, follow next steps:\n" \
       print "ERROR: jdbc file " + params.jdbc_driver_jar + " is unavailable. Please, follow next steps:\n" \
@@ -58,7 +58,7 @@ def oozie_service(action = 'start'): # 'start' or 'stop'
     
     
     Execute( cmd2,
     Execute( cmd2,
       user = params.oozie_user,       
       user = params.oozie_user,       
-      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+      not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
     )
     )
     
     
     Execute( start_cmd,
     Execute( start_cmd,

+ 18 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py

@@ -25,15 +25,28 @@ import status_params
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  hadoop_lib_home = format("/usr/hdp/{rpm_version}/hadoop/lib")
+  mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_lib_home = "/usr/lib/hadoop/lib"
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
 user_group = config['configurations']['cluster-env']['user_group']
 user_group = config['configurations']['cluster-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
 oozie_tmp_dir = "/var/tmp/oozie"
 oozie_tmp_dir = "/var/tmp/oozie"
 oozie_hdfs_user_dir = format("/user/{oozie_user}")
 oozie_hdfs_user_dir = format("/user/{oozie_user}")
 oozie_pid_dir = status_params.oozie_pid_dir
 oozie_pid_dir = status_params.oozie_pid_dir
@@ -53,7 +66,6 @@ oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
 oozie_env_sh_template = config['configurations']['oozie-env']['content']
 oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 
 oracle_driver_jar_name = "ojdbc6.jar"
 oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
 
 
 java_home = config['hostLevelParams']['java_home']
 java_home = config['hostLevelParams']['java_home']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
@@ -71,7 +83,7 @@ oozie_shared_lib = "/usr/lib/oozie/share"
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 
 
 if str(hdp_stack_version).startswith('2.0') or str(hdp_stack_version).startswith('2.1'):
 if str(hdp_stack_version).startswith('2.0') or str(hdp_stack_version).startswith('2.1'):
-  put_shared_lib_to_hdfs_cmd = format("hadoop dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
+  put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
 # for newer
 # for newer
 else:
 else:
   put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
   put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
@@ -103,7 +115,6 @@ oozie_hdfs_user_dir = format("/user/{oozie_user}")
 oozie_hdfs_user_mode = 0775
 oozie_hdfs_user_mode = 0775
 #for create_hdfs_directory
 #for create_hdfs_directory
 hostname = config["hostname"]
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -117,5 +128,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )

+ 17 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py

@@ -25,8 +25,23 @@ from resource_management import *
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-pig_conf_dir = "/etc/pig/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+  pig_conf_dir = format('/usr/hdp/{rpm_version}/etc/pig/conf')
+  pig_bin_dir = format('/usr/hdp/{rpm_version}/pig/bin')
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  pig_conf_dir = "/etc/pig/conf"
+  pig_bin_dir = ""
+
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
@@ -38,7 +53,6 @@ pig_env_sh_template = config['configurations']['pig-env']['content']
 
 
 # not supporting 32 bit jdk.
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
 
 
 pig_properties = config['configurations']['pig-properties']['content']
 pig_properties = config['configurations']['pig-properties']['content']
 
 

+ 6 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py

@@ -31,7 +31,7 @@ class PigServiceCheck(Script):
 
 
     cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
     cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
     #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    create_file_cmd = format("{cleanup_cmd}; hadoop --config {hadoop_conf_dir} dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
     test_cmd = format("fs -test -e {output_file}")
     test_cmd = format("fs -test -e {output_file}")
 
 
     ExecuteHadoop( create_file_cmd,
     ExecuteHadoop( create_file_cmd,
@@ -42,7 +42,8 @@ class PigServiceCheck(Script):
       # for kinit run
       # for kinit run
       keytab = params.smoke_user_keytab,
       keytab = params.smoke_user_keytab,
       security_enabled = params.security_enabled,
       security_enabled = params.security_enabled,
-      kinit_path_local = params.kinit_path_local
+      kinit_path_local = params.kinit_path_local,
+      bin_dir = params.hadoop_bin_dir
     )
     )
 
 
     File( format("{tmp_dir}/pigSmoke.sh"),
     File( format("{tmp_dir}/pigSmoke.sh"),
@@ -53,13 +54,14 @@ class PigServiceCheck(Script):
     Execute( format("pig {tmp_dir}/pigSmoke.sh"),
     Execute( format("pig {tmp_dir}/pigSmoke.sh"),
       tries     = 3,
       tries     = 3,
       try_sleep = 5,
       try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
       user      = params.smokeuser
       user      = params.smokeuser
     )
     )
 
 
     ExecuteHadoop( test_cmd,
     ExecuteHadoop( test_cmd,
       user      = params.smokeuser,
       user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
+      conf_dir = params.hadoop_conf_dir,
+      bin_dir = params.hadoop_bin_dir
     )
     )
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 9 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py

@@ -21,6 +21,15 @@ from resource_management import *
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  zoo_conf_dir = format('/usr/hdp/{rpm_version}/etc/zookeeper')
+else:
+  zoo_conf_dir = "/etc/zookeeper"
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 user_group = config['configurations']['cluster-env']['user_group']
 user_group = config['configurations']['cluster-env']['user_group']
@@ -29,7 +38,6 @@ sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
 hbase_home = "/usr"
 hbase_home = "/usr"
 hive_home = "/usr"
 hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml

@@ -47,7 +47,7 @@ CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
 #HCAT_PREFIX=hive_prefix
 #HCAT_PREFIX=hive_prefix
 
 
 # Set HADOOP_HOME to point to a specific hadoop install directory
 # Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop
+export HADOOP_HOME={{hadoop_home}}
     </value>
     </value>
   </property>
   </property>
   
   

+ 30 - 11
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py

@@ -26,16 +26,36 @@ import status_params
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
 
 
-if str(config['hostLevelParams']['stack_version']).startswith('2.0'):
-  config_dir = '/etc/hcatalog/conf'
-  webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-# for newer versions
+#hadoop params
+hdp_stack_version = config['hostLevelParams']['stack_version']
+if rpm_version is not None:
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+  hadoop_streeming_jars = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/hadoop-streaming-*.jar")
+  if str(hdp_stack_version).startswith('2.0'):
+    config_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
+    webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hcatalog/sbin')
+  # for newer versions
+  else:
+    config_dir = format('/usr/hdp/{rpm_version}/etc/hive-webhcat/conf')
+    webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/sbin')
 else:
 else:
-  config_dir = '/etc/hive-webhcat/conf'
-  webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+  if str(hdp_stack_version).startswith('2.0'):
+    config_dir = '/etc/hcatalog/conf'
+    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+  # for newer versions
+  else:
+    config_dir = '/etc/hive-webhcat/conf'
+    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
 
 
 webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
 webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
 templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
 templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
@@ -46,7 +66,6 @@ pid_file = status_params.pid_file
 hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
 hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 
 
-hadoop_home = '/usr'
 user_group = config['configurations']['cluster-env']['user_group']
 user_group = config['configurations']['cluster-env']['user_group']
 
 
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
@@ -64,7 +83,6 @@ webhcat_hdfs_user_mode = 0755
 webhcat_apps_dir = "/apps/webhcat"
 webhcat_apps_dir = "/apps/webhcat"
 #for create_hdfs_directory
 #for create_hdfs_directory
 hostname = config["hostname"]
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 security_param = "true" if security_enabled else "false"
 security_param = "true" if security_enabled else "false"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
@@ -79,5 +97,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )

+ 7 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py

@@ -84,12 +84,13 @@ def webhcat():
             path='/bin'
             path='/bin'
     )
     )
 
 
-  CopyFromLocal('/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar',
+  CopyFromLocal(params.hadoop_streeming_jars,
                 owner=params.webhcat_user,
                 owner=params.webhcat_user,
                 mode=0755,
                 mode=0755,
                 dest_dir=params.webhcat_apps_dir,
                 dest_dir=params.webhcat_apps_dir,
                 kinnit_if_needed=kinit_if_needed,
                 kinnit_if_needed=kinit_if_needed,
-                hdfs_user=params.hdfs_user
+                hdfs_user=params.hdfs_user,
+                hadoop_conf_dir=params.hadoop_conf_dir
   )
   )
 
 
   CopyFromLocal('/usr/share/HDP-webhcat/pig.tar.gz',
   CopyFromLocal('/usr/share/HDP-webhcat/pig.tar.gz',
@@ -97,7 +98,8 @@ def webhcat():
                 mode=0755,
                 mode=0755,
                 dest_dir=params.webhcat_apps_dir,
                 dest_dir=params.webhcat_apps_dir,
                 kinnit_if_needed=kinit_if_needed,
                 kinnit_if_needed=kinit_if_needed,
-                hdfs_user=params.hdfs_user
+                hdfs_user=params.hdfs_user,
+                hadoop_conf_dir=params.hadoop_conf_dir
   )
   )
 
 
   CopyFromLocal('/usr/share/HDP-webhcat/hive.tar.gz',
   CopyFromLocal('/usr/share/HDP-webhcat/hive.tar.gz',
@@ -105,5 +107,6 @@ def webhcat():
                 mode=0755,
                 mode=0755,
                 dest_dir=params.webhcat_apps_dir,
                 dest_dir=params.webhcat_apps_dir,
                 kinnit_if_needed=kinit_if_needed,
                 kinnit_if_needed=kinit_if_needed,
-                hdfs_user=params.hdfs_user
+                hdfs_user=params.hdfs_user,
+                hadoop_conf_dir=params.hadoop_conf_dir
   )
   )

+ 31 - 14
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py

@@ -18,6 +18,7 @@ limitations under the License.
 Ambari Agent
 Ambari Agent
 
 
 """
 """
+import os
 
 
 from resource_management import *
 from resource_management import *
 import status_params
 import status_params
@@ -26,7 +27,34 @@ import status_params
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-config_dir = "/etc/hadoop/conf"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+  hadoop_bin = format("/usr/hdp/{rpm_version}/hadoop/sbin")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  limits_conf_dir = format("/usr/hdp/{rpm_version}/etc/security/limits.d")
+  hadoop_yarn_home = format('/usr/hdp/{rpm_version}/hadoop-yarn')
+  hadoop_mapred2_jar_location = format('/usr/hdp/{rpm_version}/hadoop-mapreduce')
+  mapred_bin = format('/usr/hdp/{rpm_version}/hadoop-mapreduce/sbin')
+  yarn_bin = format('/usr/hdp/{rpm_version}/hadoop-yarn/sbin')
+  yarn_container_bin = format('/usr/hdp/{rpm_version}/hadoop-yarn/bin')
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_bin_dir = "/usr/bin"
+  limits_conf_dir = "/etc/security/limits.d"
+  hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+  hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+  mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+  yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+  yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
 
 
 ulimit_cmd = "ulimit -c unlimited;"
 ulimit_cmd = "ulimit -c unlimited;"
 
 
@@ -49,8 +77,6 @@ rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.
 java64_home = config['hostLevelParams']['java_home']
 java64_home = config['hostLevelParams']['java_home']
 hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
 hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
 
 
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
 yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
 yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
 resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
 resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
 nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
 nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
@@ -77,8 +103,6 @@ hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory
 nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
 nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
 nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
 nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
 
 
-
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
 distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
 distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
 hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 
@@ -90,13 +114,7 @@ yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
 mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
 mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
 yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
 yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
 
 
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-
 user_group = config['configurations']['cluster-env']['user_group']
 user_group = config['configurations']['cluster-env']['user_group']
-limits_conf_dir = "/etc/security/limits.d"
-hadoop_conf_dir = "/etc/hadoop/conf"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 
 
 #exclude file
 #exclude file
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
@@ -128,7 +146,6 @@ jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize",
 
 
 #for create_hdfs_directory
 #for create_hdfs_directory
 hostname = config["hostname"]
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -142,11 +159,11 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
 
-hadoop_bin = "/usr/lib/hadoop/sbin"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 
 
 #taskcontroller.cfg
 #taskcontroller.cfg

+ 3 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py

@@ -78,10 +78,10 @@ class Resourcemanager(Script):
     env.set_params(params)
     env.set_params(params)
     rm_kinit_cmd = params.rm_kinit_cmd
     rm_kinit_cmd = params.rm_kinit_cmd
     yarn_user = params.yarn_user
     yarn_user = params.yarn_user
-    conf_dir = params.config_dir
+    conf_dir = params.hadoop_conf_dir
     user_group = params.user_group
     user_group = params.user_group
 
 
-    yarn_refresh_cmd = format("{rm_kinit_cmd} /usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
+    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
 
 
     File(params.exclude_file_path,
     File(params.exclude_file_path,
          content=Template("exclude_hosts_list.j2"),
          content=Template("exclude_hosts_list.j2"),
@@ -91,6 +91,7 @@ class Resourcemanager(Script):
 
 
     if params.update_exclude_file_only == False:
     if params.update_exclude_file_only == False:
       Execute(yarn_refresh_cmd,
       Execute(yarn_refresh_cmd,
+            environment= {'PATH' : params.execute_path },
             user=yarn_user)
             user=yarn_user)
       pass
       pass
     pass
     pass

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py

@@ -35,7 +35,7 @@ def service(componentName, action='start', serviceName='yarn'):
     pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
     pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
     usr = params.yarn_user
     usr = params.yarn_user
 
 
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
 
 
   if action == 'start':
   if action == 'start':
     daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
     daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py

@@ -27,7 +27,7 @@ class ServiceCheck(Script):
     import params
     import params
     env.set_params(params)
     env.set_params(params)
 
 
-    run_yarn_check_cmd = "/usr/bin/yarn node -list"
+    run_yarn_check_cmd = format("yarn --config {hadoop_conf_dir} node -list")
 
 
     component_type = 'rm'
     component_type = 'rm'
     if params.hadoop_ssl_enabled:
     if params.hadoop_ssl_enabled:
@@ -60,6 +60,7 @@ class ServiceCheck(Script):
     )
     )
 
 
     Execute(run_yarn_check_cmd,
     Execute(run_yarn_check_cmd,
+            environment= {'PATH' : params.execute_path },
             user=params.smokeuser
             user=params.smokeuser
     )
     )
 
 

+ 7 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py

@@ -78,7 +78,7 @@ def yarn(name = None):
   )
   )
 
 
   XmlConfig("core-site.xml",
   XmlConfig("core-site.xml",
-            conf_dir=params.config_dir,
+            conf_dir=params.hadoop_conf_dir,
             configurations=params.config['configurations']['core-site'],
             configurations=params.config['configurations']['core-site'],
             configuration_attributes=params.config['configuration_attributes']['core-site'],
             configuration_attributes=params.config['configuration_attributes']['core-site'],
             owner=params.hdfs_user,
             owner=params.hdfs_user,
@@ -87,7 +87,7 @@ def yarn(name = None):
   )
   )
 
 
   XmlConfig("mapred-site.xml",
   XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
+            conf_dir=params.hadoop_conf_dir,
             configurations=params.config['configurations']['mapred-site'],
             configurations=params.config['configurations']['mapred-site'],
             configuration_attributes=params.config['configuration_attributes']['mapred-site'],
             configuration_attributes=params.config['configuration_attributes']['mapred-site'],
             owner=params.yarn_user,
             owner=params.yarn_user,
@@ -96,7 +96,7 @@ def yarn(name = None):
   )
   )
 
 
   XmlConfig("yarn-site.xml",
   XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
+            conf_dir=params.hadoop_conf_dir,
             configurations=params.config['configurations']['yarn-site'],
             configurations=params.config['configurations']['yarn-site'],
             configuration_attributes=params.config['configuration_attributes']['yarn-site'],
             configuration_attributes=params.config['configuration_attributes']['yarn-site'],
             owner=params.yarn_user,
             owner=params.yarn_user,
@@ -105,7 +105,7 @@ def yarn(name = None):
   )
   )
 
 
   XmlConfig("capacity-scheduler.xml",
   XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
+            conf_dir=params.hadoop_conf_dir,
             configurations=params.config['configurations']['capacity-scheduler'],
             configurations=params.config['configurations']['capacity-scheduler'],
             configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
             configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
             owner=params.yarn_user,
             owner=params.yarn_user,
@@ -140,7 +140,7 @@ def yarn(name = None):
        content=Template('mapreduce.conf.j2')
        content=Template('mapreduce.conf.j2')
   )
   )
 
 
-  File(format("{config_dir}/yarn-env.sh"),
+  File(format("{hadoop_conf_dir}/yarn-env.sh"),
        owner=params.yarn_user,
        owner=params.yarn_user,
        group=params.user_group,
        group=params.user_group,
        mode=0755,
        mode=0755,
@@ -154,7 +154,7 @@ def yarn(name = None):
          mode=06050
          mode=06050
     )
     )
 
 
-    File(format("{config_dir}/container-executor.cfg"),
+    File(format("{hadoop_conf_dir}/container-executor.cfg"),
          group=params.user_group,
          group=params.user_group,
          mode=0644,
          mode=0644,
          content=Template('container-executor.cfg.j2')
          content=Template('container-executor.cfg.j2')
@@ -168,7 +168,7 @@ def yarn(name = None):
     tc_mode = None
     tc_mode = None
     tc_owner = params.hdfs_user
     tc_owner = params.hdfs_user
 
 
-  File(format("{config_dir}/mapred-env.sh"),
+  File(format("{hadoop_conf_dir}/mapred-env.sh"),
        owner=tc_owner,
        owner=tc_owner,
        content=InlineTemplate(params.mapred_env_sh_template)
        content=InlineTemplate(params.mapred_env_sh_template)
   )
   )

+ 13 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py

@@ -26,15 +26,24 @@ import status_params
 config = Script.get_config()
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 tmp_dir = Script.get_tmp_dir()
 
 
-config_dir = "/etc/zookeeper/conf"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  config_dir = format('/usr/hdp/{rpm_version}/etc/zookeeper/conf')
+  zk_bin = format('/usr/hdp/{rpm_version}/zookeeper/bin')
+  smoke_script = format('/usr/hdp/{rpm_version}/zookeeper/bin/zkCli.sh')
+else:
+  config_dir = "/etc/zookeeper/conf"
+  zk_bin = '/usr/lib/zookeeper/bin'
+  smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
 zk_user =  config['configurations']['zookeeper-env']['zk_user']
 zk_user =  config['configurations']['zookeeper-env']['zk_user']
 hostname = config['hostname']
 hostname = config['hostname']
-zk_bin = '/usr/lib/zookeeper/bin'
 user_group = config['configurations']['cluster-env']['user_group']
 user_group = config['configurations']['cluster-env']['user_group']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 
 
-smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
 zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
 zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
 zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
 zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
 zk_pid_dir = status_params.zk_pid_dir
 zk_pid_dir = status_params.zk_pid_dir

+ 13 - 2
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py

@@ -23,6 +23,17 @@ from status_params import *
 
 
 config = Script.get_config()
 config = Script.get_config()
 
 
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+else:
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  hadoop_bin_dir = "/usr/bin"
+
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
@@ -53,7 +64,6 @@ flacon_apps_dir = '/apps/falcon'
 #for create_hdfs_directory
 #for create_hdfs_directory
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hostname = config["hostname"]
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -67,5 +77,6 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 )

+ 2 - 3
ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py

@@ -37,8 +37,7 @@ nimbus_host = config['configurations']['storm-site']['nimbus.host']
 rest_api_port = "8745"
 rest_api_port = "8745"
 rest_api_admin_port = "8746"
 rest_api_admin_port = "8746"
 rest_api_conf_file = format("{conf_dir}/config.yaml")
 rest_api_conf_file = format("{conf_dir}/config.yaml")
-rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
-java_home = config['hostLevelParams']['java_home']
+rest_lib_dir = default("/configurations/storm-env/rest_lib_dir","/usr/lib/storm/contrib/storm-rest")
 storm_env_sh_template = config['configurations']['storm-env']['content']
 storm_env_sh_template = config['configurations']['storm-env']['content']
 
 
 if 'ganglia_server_host' in config['clusterHostInfo'] and \
 if 'ganglia_server_host' in config['clusterHostInfo'] and \
@@ -48,7 +47,7 @@ if 'ganglia_server_host' in config['clusterHostInfo'] and \
   ganglia_report_interval = 60
   ganglia_report_interval = 60
 else:
 else:
   ganglia_installed = False
   ganglia_installed = False
-  
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 
 if security_enabled:
 if security_enabled:

+ 23 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>true</active>
+    </versions>
+    <extends>2.1</extends>
+</metainfo>

+ 82 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml

@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os type="redhat6">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.9.9.9-98</baseurl>
+      <repoid>HDP-2.9.9.9-98</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0</baseurl>
+      <repoid>HDP-2.2.0.0</repoid>
+      <reponame>HDP-2.2</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os type="redhat5">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/BUILDS/2.9.9.9-98</baseurl>
+      <repoid>HDP-2.9.9.9-98</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0</baseurl>
+      <repoid>HDP-2.2.0.0</repoid>
+      <reponame>HDP-2.2</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os type="suse11">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/BUILDS/2.9.9.9-98</baseurl>
+      <repoid>HDP-2.9.9.9-98</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/updates/2.2.0.0</baseurl>
+      <repoid>HDP-2.2.0.0</repoid>
+      <reponame>HDP-2.2</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/suse11</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os type="debian12">
+    <repo>
+      <baseurl>REPLACE_WITH_UBUNTU12_URL</baseurl>
+      <repoid>HDP-2.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://dev.hortonworks.com.s3.amazonaws.com/HDP-UTILS-1.1.0.19/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.19</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

+ 88 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json

@@ -0,0 +1,88 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "SUPERVISOR-START" : ["NIMBUS-START"],
+    "STORM_UI_SERVER-START" : ["NIMBUS-START"],
+    "DRPC_SERVER-START" : ["NIMBUS-START"],
+    "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", "SUPERVISOR-START", "DRPC_SERVER-START"],
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+    "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"],
+    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+        "NODEMANAGER-START", "RESOURCEMANAGER-START", "ZOOKEEPER_SERVER-START",
+        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START",
+        "DRPC_SERVER-START"],
+    "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"],
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"],
+    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
+    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}
+

+ 28 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FALCON</name>
+      <displayName>Falcon</displayName>
+      <comment>Data management and processing platform</comment>
+      <version>0.6.0.2.2.0.0</version>
+    </service>
+  </services>
+</metainfo>

+ 40 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <displayName>Flume</displayName>
+      <comment>Data management and processing platform</comment>
+      <version>1.5.0.1.2.9.9.9</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>flume_2_9_9_9_98</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 42 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml

@@ -0,0 +1,42 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.98.4.2.9.9.9</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase_2_9_9_9_98</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>rpm_version</name>
+    <value>2.9.9.9-98</value>
+    <description>Hadoop RPM version</description>
+  </property>
+</configuration>

+ 34 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml

@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
+  </property>
+
+</configuration>

+ 68 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml

@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.6.0.2.9.9.9</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_2_9_9_9_98</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <name>hadoop_2_9_9_9_98-libhdfs</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+            
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 44 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml

@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.14.0.2.9.9.9</version>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.14.0.2.9.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive_2_9_9_9_98-hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+
+  </services>
+</metainfo>

+ 38 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml

@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+
+
+
+
+</configuration>

+ 28 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.1.0.2.2.0.0</version>
+    </service>
+  </services>
+</metainfo>

+ 41 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml

@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <displayName>Pig</displayName>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.14.0.2.9.9.9</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>pig_2_9_9_9_98</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+
+    </service>
+  </services>
+</metainfo>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.5.2.2</version>
+    </service>
+  </services>
+</metainfo>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>rest_lib_dir</name>
+    <value>/usr/lib/storm/external/storm-rest</value>
+    <description></description>
+  </property>
+</configuration>

+ 54 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+
+
+  <property>
+    <name>nimbus.childopts</name>
+    <value>-Xmx1024m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
+    <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+  </property>
+
+  <property>
+    <name>worker.childopts</name>
+    <value>-Xmx768m -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
+    <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+  </property>
+
+
+
+  <property>
+    <name>ui.childopts</name>
+    <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
+    <description>Childopts for Storm UI Java process.</description>
+  </property>
+
+  <property>
+    <name>supervisor.childopts</name>
+    <value>-Xmx256m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
+    <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+  </property>
+
+
+
+</configuration>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <displayName>Storm</displayName>
+      <comment>Apache Hadoop Stream processing framework</comment>
+      <version>0.9.3.2.2.0.0</version>
+    </service>
+  </services>
+</metainfo>

+ 40 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TEZ</name>
+      <displayName>Tez</displayName>
+      <comment>Tez is the next generation Hadoop Query Processing framework written on top of YARN.</comment>
+      <version>0.6.0.2.9.9.9</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>tez_2_9_9_9_98</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 59 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml

@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/hdp/2.9.9.9-98/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/hdp/2.9.9.9-98/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/hdp/2.9.9.9-98/hadoop/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/hdp/2.9.9.9-98/hive/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+
+</configuration>

+ 44 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml

@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>WEBHCAT</name>
+      <comment>This is comment for WEBHCAT service</comment>
+      <version>0.14.0.2.9.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive_2_9_9_9_98-webhcat</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 36 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml

@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/hdp/2.9.9.9-98/hadoop/lib/native/Linux-amd64-64</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+  </property>
+
+
+</configuration>

+ 35 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml

@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+  </property>
+
+</configuration>

+ 71 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml

@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.6.0.2.9.9.9</version>
+      <components>
+        <components>
+          <component>
+            <name>APP_TIMELINE_SERVER</name>
+            <cardinality>1</cardinality>
+          </component>
+        </components>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_2_9_9_9_98-yarn</name>
+            </package>
+            <package>
+              <name>hadoop_2_9_9_9_98-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.6.0.2.9.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_2_9_9_9_98-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+    </service>
+
+  </services>
+</metainfo>

+ 40 - 0
ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <displayName>ZooKeeper</displayName>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.5.2.9.9.9</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_2_9_9_9_98</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 15 - 3
ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py

@@ -50,18 +50,29 @@ class TestServiceCheck(RMFTestCase):
         tries = 20,
         tries = 20,
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         try_sleep = 3,
         try_sleep = 3,
+        bin_dir = '/usr/bin',
         user = 'ambari-qa',
         user = 'ambari-qa',
     )
     )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp',
+    self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp',
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
+        bin_dir = '/usr/bin',
         logoutput = True,
         logoutput = True,
-        not_if = 'hadoop fs -test -e /tmp',
+        not_if = 'hadoop --config /etc/hadoop/conf fs -test -e /tmp',
         try_sleep = 3,
         try_sleep = 3,
         tries = 5,
         tries = 5,
         user = 'ambari-qa',
         user = 'ambari-qa',
     )
     )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop fs -put /etc/passwd /tmp/',
+    self.assertResourceCalled('ExecuteHadoop', 'fs -chmod 777 /tmp',
+        conf_dir = '/etc/hadoop/conf',
+        bin_dir = '/usr/bin',
+        logoutput = True,
+        try_sleep = 3,
+        tries = 5,
+        user = 'ambari-qa',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop --config /etc/hadoop/conf fs -put /etc/passwd /tmp/',
         logoutput = True,
         logoutput = True,
+        bin_dir = '/usr/bin',
         tries = 5,
         tries = 5,
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         try_sleep = 3,
         try_sleep = 3,
@@ -70,6 +81,7 @@ class TestServiceCheck(RMFTestCase):
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /tmp/',
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /tmp/',
         logoutput = True,
         logoutput = True,
         tries = 5,
         tries = 5,
+        bin_dir = '/usr/bin',
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         try_sleep = 3,
         try_sleep = 3,
         user = 'ambari-qa',
         user = 'ambari-qa',

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -250,6 +250,7 @@ class TestHBaseMaster(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -260,6 +261,7 @@ class TestHBaseMaster(RMFTestCase):
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               mode = 0711,
                               mode = 0711,
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -268,6 +270,7 @@ class TestHBaseMaster(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
 
 
@@ -350,6 +353,7 @@ class TestHBaseMaster(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -360,6 +364,7 @@ class TestHBaseMaster(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0711,
                               mode = 0711,
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -368,5 +373,6 @@ class TestHBaseMaster(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py

@@ -179,6 +179,7 @@ class TestHbaseRegionServer(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -189,6 +190,7 @@ class TestHbaseRegionServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0711,
                               mode = 0711,
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -197,6 +199,7 @@ class TestHbaseRegionServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
 
 
@@ -279,6 +282,7 @@ class TestHbaseRegionServer(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -289,6 +293,7 @@ class TestHbaseRegionServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0711,
                               mode = 0711,
                               owner = 'hbase',
                               owner = 'hbase',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -297,5 +302,6 @@ class TestHbaseRegionServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )

+ 5 - 5
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py

@@ -39,13 +39,13 @@ class TestServiceCheck(RMFTestCase):
       content = Template('hbase-smoke.sh.j2'),
       content = Template('hbase-smoke.sh.j2'),
       mode = 0755,
       mode = 0755,
     )
     )
-    self.assertResourceCalled('Execute', ' hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
       logoutput = True,
       logoutput = True,
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
       try_sleep = 5,
       try_sleep = 5,
     )
     )
-    self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf ',
+    self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf  /usr/lib/hbase/bin/hbase',
       logoutput = True,
       logoutput = True,
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
@@ -74,16 +74,16 @@ class TestServiceCheck(RMFTestCase):
       group = 'hadoop',
       group = 'hadoop',
       mode = 0644,
       mode = 0644,
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; hbase shell /tmp/hbase_grant_permissions.sh',
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; /usr/lib/hbase/bin/hbase shell /tmp/hbase_grant_permissions.sh',
       user = 'hbase',
       user = 'hbase',
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
       logoutput = True,
       logoutput = True,
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
       try_sleep = 5,
       try_sleep = 5,
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf ',
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf  /usr/lib/hbase/bin/hbase',
       logoutput = True,
       logoutput = True,
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',

+ 25 - 11
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -48,7 +48,7 @@ class TestNamenode(RMFTestCase):
                               content = StaticFile('checkForFormat.sh'),
                               content = StaticFile('checkForFormat.sh'),
                               mode = 0755,
                               mode = 0755,
                               )
                               )
-    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
                               not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
                               )
                               )
@@ -75,7 +75,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
     self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+    self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
                               tries = 40,
                               tries = 40,
                               only_if = None,
                               only_if = None,
                               try_sleep = 10,
                               try_sleep = 10,
@@ -88,6 +88,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0777,
                               mode = 0777,
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -98,6 +99,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0770,
                               mode = 0770,
                               owner = 'ambari-qa',
                               owner = 'ambari-qa',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -107,6 +109,7 @@ class TestNamenode(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],
+                              bin_dir = '/usr/bin',
                               only_if = None,
                               only_if = None,
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
@@ -149,7 +152,7 @@ class TestNamenode(RMFTestCase):
                               content = StaticFile('checkForFormat.sh'),
                               content = StaticFile('checkForFormat.sh'),
                               mode = 0755,
                               mode = 0755,
                               )
                               )
-    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
                               not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
                               )
                               )
@@ -179,7 +182,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
                               user = 'hdfs',
                               user = 'hdfs',
                               )
                               )
-    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+    self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
                               tries = 40,
                               tries = 40,
                               only_if = None,
                               only_if = None,
                               try_sleep = 10,
                               try_sleep = 10,
@@ -192,6 +195,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0777,
                               mode = 0777,
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -202,6 +206,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0770,
                               mode = 0770,
                               owner = 'ambari-qa',
                               owner = 'ambari-qa',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -211,6 +216,7 @@ class TestNamenode(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],
+                              bin_dir = '/usr/bin',
                               only_if = None,
                               only_if = None,
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
@@ -260,9 +266,9 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
     self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
                               )
-    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+    self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
                               tries = 40,
                               tries = 40,
-                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
                               try_sleep = 10,
                               try_sleep = 10,
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/tmp',
     self.assertResourceCalled('HdfsDirectory', '/tmp',
@@ -273,6 +279,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0777,
                               mode = 0777,
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -283,6 +290,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0770,
                               mode = 0770,
                               owner = 'ambari-qa',
                               owner = 'ambari-qa',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -292,7 +300,8 @@ class TestNamenode(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],
-                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              bin_dir = '/usr/bin',
+                              only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -326,9 +335,9 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
                               user = 'hdfs',
                               user = 'hdfs',
                               )
                               )
-    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+    self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
                               tries = 40,
                               tries = 40,
-                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
                               try_sleep = 10,
                               try_sleep = 10,
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/tmp',
     self.assertResourceCalled('HdfsDirectory', '/tmp',
@@ -339,6 +348,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0777,
                               mode = 0777,
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
     self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -349,6 +359,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0770,
                               mode = 0770,
                               owner = 'ambari-qa',
                               owner = 'ambari-qa',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -358,7 +369,8 @@ class TestNamenode(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],
-                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              bin_dir = '/usr/bin',
+                              only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
                               )
                               )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -377,6 +389,7 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -refreshNodes',
     self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -refreshNodes',
                               user = 'hdfs',
                               user = 'hdfs',
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
+                              bin_dir = '/usr/bin',
                               kinit_override = True)
                               kinit_override = True)
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -394,7 +407,8 @@ class TestNamenode(RMFTestCase):
     self.assertResourceCalled('Execute', '', user = 'hdfs')
     self.assertResourceCalled('Execute', '', user = 'hdfs')
     self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes', 
     self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes', 
                               user = 'hdfs', 
                               user = 'hdfs', 
-                              conf_dir = '/etc/hadoop/conf', 
+                              conf_dir = '/etc/hadoop/conf',
+                              bin_dir = '/usr/bin',
                               kinit_override = True)
                               kinit_override = True)
     self.assertNoMoreResources()    
     self.assertNoMoreResources()    
 
 

+ 15 - 3
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py

@@ -50,20 +50,31 @@ class TestServiceCheck(RMFTestCase):
         tries = 20,
         tries = 20,
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         try_sleep = 3,
         try_sleep = 3,
+        bin_dir = '/usr/bin',
         user = 'ambari-qa',
         user = 'ambari-qa',
     )
     )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp',
+    self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp',
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         logoutput = True,
         logoutput = True,
-        not_if = 'hadoop fs -test -e /tmp',
+        not_if = 'hadoop --config /etc/hadoop/conf fs -test -e /tmp',
         try_sleep = 3,
         try_sleep = 3,
         tries = 5,
         tries = 5,
+        bin_dir = '/usr/bin',
         user = 'ambari-qa',
         user = 'ambari-qa',
     )
     )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop fs -put /etc/passwd /tmp/',
+    self.assertResourceCalled('ExecuteHadoop', 'fs -chmod 777 /tmp',
+        conf_dir = '/etc/hadoop/conf',
+        logoutput = True,
+        try_sleep = 3,
+        tries = 5,
+        bin_dir = '/usr/bin',
+        user = 'ambari-qa',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop --config /etc/hadoop/conf fs -put /etc/passwd /tmp/',
         logoutput = True,
         logoutput = True,
         tries = 5,
         tries = 5,
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
+        bin_dir = '/usr/bin',
         try_sleep = 3,
         try_sleep = 3,
         user = 'ambari-qa',
         user = 'ambari-qa',
     )
     )
@@ -71,6 +82,7 @@ class TestServiceCheck(RMFTestCase):
         logoutput = True,
         logoutput = True,
         tries = 5,
         tries = 5,
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
+        bin_dir = '/usr/bin',
         try_sleep = 3,
         try_sleep = 3,
         user = 'ambari-qa',
         user = 'ambari-qa',
     )
     )

+ 8 - 2
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py

@@ -28,7 +28,10 @@ class TestHcatClient(RMFTestCase):
                        command = "configure",
                        command = "configure",
                        config_file="default.json"
                        config_file="default.json"
     )
     )
-
+    self.assertResourceCalled('Directory', '/etc/hive/conf',
+                              owner = 'hcat',
+                              group = 'hadoop',
+    )
     self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
     self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
       owner = 'hcat',
       owner = 'hcat',
       group = 'hadoop',
       group = 'hadoop',
@@ -59,7 +62,10 @@ class TestHcatClient(RMFTestCase):
                          command = "configure",
                          command = "configure",
                          config_file="secured.json"
                          config_file="secured.json"
     )
     )
-
+    self.assertResourceCalled('Directory', '/etc/hive/conf',
+                              owner = 'hcat',
+                              group = 'hadoop',
+    )
     self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
     self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
       owner = 'hcat',
       owner = 'hcat',
       group = 'hadoop',
       group = 'hadoop',

+ 8 - 0
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py

@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 '''
 '''
+import os
 from mock.mock import MagicMock, call, patch
 from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
@@ -40,6 +41,8 @@ class TestHiveMetastore(RMFTestCase):
     self.assert_configure_default()
     self.assert_configure_default()
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
         not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+                       'HADOOP_HOME' : '/usr'},
         user = 'hive',
         user = 'hive',
     )
     )
     self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
     self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
@@ -78,8 +81,11 @@ class TestHiveMetastore(RMFTestCase):
     )
     )
 
 
     self.assert_configure_secured()
     self.assert_configure_secured()
+    self.maxDiff = None
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
         not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
         not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+                       'HADOOP_HOME' : '/usr'},
         user = 'hive',
         user = 'hive',
     )
     )
     self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
     self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
@@ -196,6 +202,7 @@ class TestHiveMetastore(RMFTestCase):
     )
     )
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
         path = ['/bin', '/usr/bin/'],
         path = ['/bin', '/usr/bin/'],
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
@@ -322,6 +329,7 @@ class TestHiveMetastore(RMFTestCase):
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
         path = ['/bin', '/usr/bin/'],
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',

+ 18 - 0
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 '''
 '''
+import os
 from mock.mock import MagicMock, call, patch
 from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
@@ -53,6 +54,7 @@ class TestHiveServer(RMFTestCase):
                               keytab = UnknownConfigurationMock(),
                               keytab = UnknownConfigurationMock(),
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
+                              bin_dir = '/usr/bin',
                               kinit_path_local = "/usr/bin/kinit"
                               kinit_path_local = "/usr/bin/kinit"
     )
     )
 
 
@@ -64,6 +66,7 @@ class TestHiveServer(RMFTestCase):
                               keytab = UnknownConfigurationMock(),
                               keytab = UnknownConfigurationMock(),
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
+                              bin_dir = '/usr/bin',
                               kinit_path_local = "/usr/bin/kinit"
                               kinit_path_local = "/usr/bin/kinit"
     )
     )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -72,6 +75,7 @@ class TestHiveServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create']
                               action = ['create']
                               )
                               )
 
 
@@ -80,6 +84,7 @@ class TestHiveServer(RMFTestCase):
                               owner='tez',
                               owner='tez',
                               dest_dir='/apps/tez/',
                               dest_dir='/apps/tez/',
                               kinnit_if_needed='',
                               kinnit_if_needed='',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
 
 
@@ -88,11 +93,14 @@ class TestHiveServer(RMFTestCase):
                               owner='tez',
                               owner='tez',
                               dest_dir='/apps/tez/lib/',
                               dest_dir='/apps/tez/lib/',
                               kinnit_if_needed='',
                               kinnit_if_needed='',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
 
 
     self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server /var/log/hive',
     self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server /var/log/hive',
                               not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
+                              environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+                                             'HADOOP_HOME' : '/usr'},
                               user = 'hive'
                               user = 'hive'
     )
     )
 
 
@@ -144,6 +152,8 @@ class TestHiveServer(RMFTestCase):
     self.assert_configure_secured()
     self.assert_configure_secured()
     self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server /var/log/hive',
     self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server /var/log/hive',
                               not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
+                              environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+                                             'HADOOP_HOME' : '/usr'},
                               user = 'hive'
                               user = 'hive'
     )
     )
 
 
@@ -180,6 +190,7 @@ class TestHiveServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
         mode = 0777,
         mode = 0777,
         owner = 'hive',
         owner = 'hive',
+        bin_dir = '/usr/bin',
         action = ['create_delayed'],
         action = ['create_delayed'],
     )
     )
     self.assertResourceCalled('HdfsDirectory', '/user/hive',
     self.assertResourceCalled('HdfsDirectory', '/user/hive',
@@ -190,6 +201,7 @@ class TestHiveServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
         mode = 0700,
         mode = 0700,
         owner = 'hive',
         owner = 'hive',
+        bin_dir = '/usr/bin',
         action = ['create_delayed'],
         action = ['create_delayed'],
     )
     )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -198,6 +210,7 @@ class TestHiveServer(RMFTestCase):
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         hdfs_user = 'hdfs',
         hdfs_user = 'hdfs',
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
+        bin_dir = '/usr/bin',
         action = ['create'],
         action = ['create'],
     )
     )
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
     self.assertResourceCalled('Directory', '/etc/hive/conf.server',
@@ -295,6 +308,7 @@ class TestHiveServer(RMFTestCase):
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
         path = ['/bin', '/usr/bin/'],
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
@@ -331,6 +345,7 @@ class TestHiveServer(RMFTestCase):
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         hdfs_user = 'hdfs',
         hdfs_user = 'hdfs',
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
+        bin_dir = '/usr/bin',
         mode = 0777,
         mode = 0777,
         owner = 'hive',
         owner = 'hive',
         action = ['create_delayed'],
         action = ['create_delayed'],
@@ -342,6 +357,7 @@ class TestHiveServer(RMFTestCase):
         hdfs_user = 'hdfs',
         hdfs_user = 'hdfs',
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
         mode = 0700,
         mode = 0700,
+        bin_dir = '/usr/bin',
         owner = 'hive',
         owner = 'hive',
         action = ['create_delayed'],
         action = ['create_delayed'],
     )
     )
@@ -350,6 +366,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         conf_dir = '/etc/hadoop/conf',
         conf_dir = '/etc/hadoop/conf',
         hdfs_user = 'hdfs',
         hdfs_user = 'hdfs',
+        bin_dir = '/usr/bin',
         kinit_path_local = '/usr/bin/kinit',
         kinit_path_local = '/usr/bin/kinit',
         action = ['create'],
         action = ['create'],
     )
     )
@@ -448,6 +465,7 @@ class TestHiveServer(RMFTestCase):
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
         path = ['/bin', '/usr/bin/'],
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',

+ 7 - 0
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py

@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 '''
 '''
+import os
 from mock.mock import MagicMock, call, patch
 from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 import datetime, sys, socket
 import datetime, sys, socket
@@ -42,6 +43,7 @@ class TestServiceCheck(RMFTestCase):
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         tries = 3,
                         tries = 3,
                         user = 'ambari-qa',
                         user = 'ambari-qa',
+                        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
                         try_sleep = 5,
                         try_sleep = 5,
     )
     )
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
@@ -50,6 +52,7 @@ class TestServiceCheck(RMFTestCase):
                         conf_dir = '/etc/hadoop/conf',
                         conf_dir = '/etc/hadoop/conf',
                         keytab=UnknownConfigurationMock(),
                         keytab=UnknownConfigurationMock(),
                         kinit_path_local='/usr/bin/kinit',
                         kinit_path_local='/usr/bin/kinit',
+                        bin_dir = '/usr/lib/hive/bin',
                         security_enabled=False
                         security_enabled=False
     )
     )
     self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup',
     self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup',
@@ -57,6 +60,7 @@ class TestServiceCheck(RMFTestCase):
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         tries = 3,
                         tries = 3,
                         user = 'ambari-qa',
                         user = 'ambari-qa',
+                        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
                         try_sleep = 5,
                         try_sleep = 5,
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
@@ -78,6 +82,7 @@ class TestServiceCheck(RMFTestCase):
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         tries = 3,
                         tries = 3,
                         user = 'ambari-qa',
                         user = 'ambari-qa',
+                        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
                         try_sleep = 5,
                         try_sleep = 5,
     )
     )
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
@@ -87,6 +92,7 @@ class TestServiceCheck(RMFTestCase):
                         keytab='/etc/security/keytabs/hdfs.headless.keytab',
                         keytab='/etc/security/keytabs/hdfs.headless.keytab',
                         kinit_path_local='/usr/bin/kinit',
                         kinit_path_local='/usr/bin/kinit',
                         security_enabled=True,
                         security_enabled=True,
+                        bin_dir = '/usr/lib/hive/bin',
                         principal='hdfs'
                         principal='hdfs'
     )
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;  /tmp/hcatSmoke.sh hcatsmoke cleanup',
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;  /tmp/hcatSmoke.sh hcatsmoke cleanup',
@@ -94,6 +100,7 @@ class TestServiceCheck(RMFTestCase):
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
                         tries = 3,
                         tries = 3,
                         user = 'ambari-qa',
                         user = 'ambari-qa',
+                        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
                         try_sleep = 5,
                         try_sleep = 5,
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()

+ 6 - 4
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py

@@ -45,8 +45,8 @@ class TestOozieServer(RMFTestCase):
         ignore_failures = True,
         ignore_failures = True,
         user = 'oozie',
         user = 'oozie',
         )
         )
-    self.assertResourceCalled('Execute', ' hadoop dfs -put /usr/lib/oozie/share /user/oozie ; hadoop dfs -chmod -R 755 /user/oozie/share',
-        not_if = " hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
+    self.assertResourceCalled('Execute', ' hadoop --config /etc/hadoop/conf dfs -put /usr/lib/oozie/share /user/oozie ; hadoop --config /etc/hadoop/conf dfs -chmod -R 755 /user/oozie/share',
+        not_if = " hadoop --config /etc/hadoop/conf dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
         user = 'oozie',
         user = 'oozie',
         )
         )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
@@ -91,8 +91,8 @@ class TestOozieServer(RMFTestCase):
                               ignore_failures = True,
                               ignore_failures = True,
                               user = 'oozie',
                               user = 'oozie',
                               )
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop dfs -put /usr/lib/oozie/share /user/oozie ; hadoop dfs -chmod -R 755 /user/oozie/share',
-                              not_if = "/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop --config /etc/hadoop/conf dfs -put /usr/lib/oozie/share /user/oozie ; hadoop --config /etc/hadoop/conf dfs -chmod -R 755 /user/oozie/share',
+                              not_if = "/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop --config /etc/hadoop/conf dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
                               user = 'oozie',
                               user = 'oozie',
                               )
                               )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
@@ -122,6 +122,7 @@ class TestOozieServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0775,
                               mode = 0775,
                               owner = 'oozie',
                               owner = 'oozie',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
     )
     )
     self.assertResourceCalled('XmlConfig', 'oozie-site.xml',
     self.assertResourceCalled('XmlConfig', 'oozie-site.xml',
@@ -224,6 +225,7 @@ class TestOozieServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0775,
                               mode = 0775,
                               owner = 'oozie',
                               owner = 'oozie',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('XmlConfig', 'oozie-site.xml',
     self.assertResourceCalled('XmlConfig', 'oozie-site.xml',

+ 8 - 4
ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py

@@ -28,13 +28,14 @@ class TestPigServiceCheck(RMFTestCase):
                        command = "service_check",
                        command = "service_check",
                        config_file="default.json"
                        config_file="default.json"
     )
     )
-    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ',
+    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ',
       try_sleep = 5,
       try_sleep = 5,
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
       conf_dir = '/etc/hadoop/conf',
       conf_dir = '/etc/hadoop/conf',
       security_enabled = False,
       security_enabled = False,
       keytab = UnknownConfigurationMock(),
       keytab = UnknownConfigurationMock(),
+      bin_dir = '/usr/bin',
       kinit_path_local = '/usr/bin/kinit'
       kinit_path_local = '/usr/bin/kinit'
     )
     )
        
        
@@ -44,7 +45,7 @@ class TestPigServiceCheck(RMFTestCase):
     )
     )
        
        
     self.assertResourceCalled('Execute', 'pig /tmp/pigSmoke.sh',
     self.assertResourceCalled('Execute', 'pig /tmp/pigSmoke.sh',
-      path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      path = [':/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
       try_sleep = 5,
       try_sleep = 5,
@@ -52,6 +53,7 @@ class TestPigServiceCheck(RMFTestCase):
        
        
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e pigsmoke.out',
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e pigsmoke.out',
       user = 'ambari-qa',
       user = 'ambari-qa',
+      bin_dir = '/usr/bin',
       conf_dir = '/etc/hadoop/conf',
       conf_dir = '/etc/hadoop/conf',
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
@@ -63,13 +65,14 @@ class TestPigServiceCheck(RMFTestCase):
                        config_file="secured.json"
                        config_file="secured.json"
     )
     )
     
     
-    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ',
+    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ',
       try_sleep = 5,
       try_sleep = 5,
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
       conf_dir = '/etc/hadoop/conf',
       conf_dir = '/etc/hadoop/conf',
       security_enabled = True, 
       security_enabled = True, 
       keytab = '/etc/security/keytabs/smokeuser.headless.keytab',
       keytab = '/etc/security/keytabs/smokeuser.headless.keytab',
+      bin_dir = '/usr/bin',
       kinit_path_local = '/usr/bin/kinit'
       kinit_path_local = '/usr/bin/kinit'
     )
     )
        
        
@@ -79,7 +82,7 @@ class TestPigServiceCheck(RMFTestCase):
     )
     )
        
        
     self.assertResourceCalled('Execute', 'pig /tmp/pigSmoke.sh',
     self.assertResourceCalled('Execute', 'pig /tmp/pigSmoke.sh',
-      path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      path = [':/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
       tries = 3,
       tries = 3,
       user = 'ambari-qa',
       user = 'ambari-qa',
       try_sleep = 5,
       try_sleep = 5,
@@ -87,6 +90,7 @@ class TestPigServiceCheck(RMFTestCase):
        
        
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e pigsmoke.out',
     self.assertResourceCalled('ExecuteHadoop', 'fs -test -e pigsmoke.out',
       user = 'ambari-qa',
       user = 'ambari-qa',
+      bin_dir = '/usr/bin',
       conf_dir = '/etc/hadoop/conf',
       conf_dir = '/etc/hadoop/conf',
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()

+ 12 - 0
ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py

@@ -107,6 +107,7 @@ class TestWebHCatServer(RMFTestCase):
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               mode = 0755,
                               mode = 0755,
                               owner = 'hcat',
                               owner = 'hcat',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/user/hcat',
     self.assertResourceCalled('HdfsDirectory', '/user/hcat',
@@ -117,6 +118,7 @@ class TestWebHCatServer(RMFTestCase):
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               mode = 0755,
                               mode = 0755,
                               owner = 'hcat',
                               owner = 'hcat',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -125,6 +127,7 @@ class TestWebHCatServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('Directory', '/var/run/webhcat',
     self.assertResourceCalled('Directory', '/var/run/webhcat',
@@ -160,6 +163,7 @@ class TestWebHCatServer(RMFTestCase):
                               mode=0755,
                               mode=0755,
                               dest_dir='/apps/webhcat',
                               dest_dir='/apps/webhcat',
                               kinnit_if_needed='',
                               kinnit_if_needed='',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/pig.tar.gz',
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/pig.tar.gz',
@@ -167,6 +171,7 @@ class TestWebHCatServer(RMFTestCase):
                               mode=0755,
                               mode=0755,
                               dest_dir='/apps/webhcat',
                               dest_dir='/apps/webhcat',
                               kinnit_if_needed='',
                               kinnit_if_needed='',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/hive.tar.gz',
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/hive.tar.gz',
@@ -174,6 +179,7 @@ class TestWebHCatServer(RMFTestCase):
                               mode=0755,
                               mode=0755,
                               dest_dir='/apps/webhcat',
                               dest_dir='/apps/webhcat',
                               kinnit_if_needed='',
                               kinnit_if_needed='',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
 
 
@@ -186,6 +192,7 @@ class TestWebHCatServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0755,
                               mode = 0755,
                               owner = 'hcat',
                               owner = 'hcat',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/user/hcat',
     self.assertResourceCalled('HdfsDirectory', '/user/hcat',
@@ -196,6 +203,7 @@ class TestWebHCatServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0755,
                               mode = 0755,
                               owner = 'hcat',
                               owner = 'hcat',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -204,6 +212,7 @@ class TestWebHCatServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('Directory', '/var/run/webhcat',
     self.assertResourceCalled('Directory', '/var/run/webhcat',
@@ -243,6 +252,7 @@ class TestWebHCatServer(RMFTestCase):
                               mode=0755,
                               mode=0755,
                               dest_dir='/apps/webhcat',
                               dest_dir='/apps/webhcat',
                               kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
                               kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/pig.tar.gz',
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/pig.tar.gz',
@@ -250,6 +260,7 @@ class TestWebHCatServer(RMFTestCase):
                               mode=0755,
                               mode=0755,
                               dest_dir='/apps/webhcat',
                               dest_dir='/apps/webhcat',
                               kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
                               kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/hive.tar.gz',
     self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/hive.tar.gz',
@@ -257,5 +268,6 @@ class TestWebHCatServer(RMFTestCase):
                               mode=0755,
                               mode=0755,
                               dest_dir='/apps/webhcat',
                               dest_dir='/apps/webhcat',
                               kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
                               kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
+                              hadoop_conf_dir='/etc/hadoop/conf',
                               hdfs_user='hdfs'
                               hdfs_user='hdfs'
     )
     )

+ 12 - 0
ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py

@@ -124,6 +124,7 @@ class TestHistoryServer(RMFTestCase):
                               group = 'hadoop',
                               group = 'hadoop',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               mode = 0777,
                               mode = 0777,
+                              bin_dir = '/usr/bin'
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred',
     self.assertResourceCalled('HdfsDirectory', '/mapred',
                               security_enabled = False,
                               security_enabled = False,
@@ -132,6 +133,7 @@ class TestHistoryServer(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'mapred',
                               owner = 'mapred',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -141,6 +143,7 @@ class TestHistoryServer(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -152,6 +155,7 @@ class TestHistoryServer(RMFTestCase):
                               mode = 0777,
                               mode = 0777,
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
@@ -163,6 +167,7 @@ class TestHistoryServer(RMFTestCase):
                               mode = 01777,
                               mode = 01777,
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -171,6 +176,7 @@ class TestHistoryServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
@@ -314,6 +320,7 @@ class TestHistoryServer(RMFTestCase):
                               owner = 'yarn',
                               owner = 'yarn',
                               group = 'hadoop',
                               group = 'hadoop',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
+                              bin_dir = '/usr/bin',
                               mode = 0777,
                               mode = 0777,
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred',
     self.assertResourceCalled('HdfsDirectory', '/mapred',
@@ -323,6 +330,7 @@ class TestHistoryServer(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'mapred',
                               owner = 'mapred',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -332,6 +340,7 @@ class TestHistoryServer(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -343,6 +352,7 @@ class TestHistoryServer(RMFTestCase):
                               mode = 0777,
                               mode = 0777,
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
@@ -354,6 +364,7 @@ class TestHistoryServer(RMFTestCase):
                               mode = 01777,
                               mode = 01777,
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -362,6 +373,7 @@ class TestHistoryServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',

+ 12 - 0
ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py

@@ -120,6 +120,7 @@ class TestNodeManager(RMFTestCase):
                               owner = 'yarn',
                               owner = 'yarn',
                               group = 'hadoop',
                               group = 'hadoop',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
+                              bin_dir = '/usr/bin',
                               mode = 0777,
                               mode = 0777,
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred',
     self.assertResourceCalled('HdfsDirectory', '/mapred',
@@ -129,6 +130,7 @@ class TestNodeManager(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'mapred',
                               owner = 'mapred',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -138,6 +140,7 @@ class TestNodeManager(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -149,6 +152,7 @@ class TestNodeManager(RMFTestCase):
                               mode = 0777,
                               mode = 0777,
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
@@ -160,6 +164,7 @@ class TestNodeManager(RMFTestCase):
                               mode = 01777,
                               mode = 01777,
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -168,6 +173,7 @@ class TestNodeManager(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
@@ -311,6 +317,7 @@ class TestNodeManager(RMFTestCase):
                               owner = 'yarn',
                               owner = 'yarn',
                               group = 'hadoop',
                               group = 'hadoop',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
+                              bin_dir = '/usr/bin',
                               mode = 0777,
                               mode = 0777,
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred',
     self.assertResourceCalled('HdfsDirectory', '/mapred',
@@ -320,6 +327,7 @@ class TestNodeManager(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'mapred',
                               owner = 'mapred',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
     self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -329,6 +337,7 @@ class TestNodeManager(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               owner = 'hdfs',
                               owner = 'hdfs',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
     self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -338,6 +347,7 @@ class TestNodeManager(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0777,
                               mode = 0777,
+                              bin_dir = '/usr/bin',
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
@@ -349,6 +359,7 @@ class TestNodeManager(RMFTestCase):
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 01777,
                               mode = 01777,
+                              bin_dir = '/usr/bin',
                               owner = 'mapred',
                               owner = 'mapred',
                               group = 'hadoop',
                               group = 'hadoop',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
@@ -357,6 +368,7 @@ class TestNodeManager(RMFTestCase):
                               security_enabled = True,
                               security_enabled = True,
                               keytab = '/etc/security/keytabs/hdfs.headless.keytab',
                               keytab = '/etc/security/keytabs/hdfs.headless.keytab',
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
+                              bin_dir = '/usr/bin',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               action = ['create'],
                               action = ['create'],

+ 6 - 3
ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py

@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 '''
 '''
+import os
 from mock.mock import MagicMock, call, patch
 from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
@@ -41,8 +42,9 @@ class TestServiceCheck(RMFTestCase):
                           user = 'ambari-qa',
                           user = 'ambari-qa',
                           try_sleep = 5,
                           try_sleep = 5,
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/yarn node -list',
-                          user = 'ambari-qa',
+    self.assertResourceCalled('Execute', 'yarn --config /etc/hadoop/conf node -list',
+                              environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/bin"},
+                              user = 'ambari-qa',
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
@@ -63,7 +65,8 @@ class TestServiceCheck(RMFTestCase):
                           user = 'ambari-qa',
                           user = 'ambari-qa',
                           try_sleep = 5,
                           try_sleep = 5,
     )
     )
-    self.assertResourceCalled('Execute', '/usr/bin/yarn node -list',
+    self.assertResourceCalled('Execute', 'yarn --config /etc/hadoop/conf node -list',
+                          environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/bin"},
                           user = 'ambari-qa',
                           user = 'ambari-qa',
     )
     )
     self.assertNoMoreResources()
     self.assertNoMoreResources()

+ 2 - 0
ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py

@@ -94,6 +94,7 @@ class TestFalconServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
                               mode = 0777,
                               mode = 0777,
                               owner = 'falcon',
                               owner = 'falcon',
+                              bin_dir = '/usr/bin',
                               action = ['create_delayed'],
                               action = ['create_delayed'],
                               )
                               )
     self.assertResourceCalled('HdfsDirectory', None,
     self.assertResourceCalled('HdfsDirectory', None,
@@ -102,6 +103,7 @@ class TestFalconServer(RMFTestCase):
                               conf_dir = '/etc/hadoop/conf',
                               conf_dir = '/etc/hadoop/conf',
                               hdfs_user = 'hdfs',
                               hdfs_user = 'hdfs',
                               kinit_path_local = '/usr/bin/kinit',
                               kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/bin',
                               action = ['create'],
                               action = ['create'],
                               )
                               )
     self.assertResourceCalled('Directory', '/hadoop/falcon',
     self.assertResourceCalled('Directory', '/hadoop/falcon',

+ 7 - 0
ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py

@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 See the License for the specific language governing permissions and
 limitations under the License.
 limitations under the License.
 '''
 '''
+import os
 from mock.mock import MagicMock, call, patch
 from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 from stacks.utils.RMFTestCase import *
 
 
@@ -40,6 +41,8 @@ class TestHiveMetastore(RMFTestCase):
     self.assert_configure_default()
     self.assert_configure_default()
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
                               not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+                              environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+                                             'HADOOP_HOME' : '/usr'},
                               user = 'hive'
                               user = 'hive'
     )
     )
 
 
@@ -82,6 +85,8 @@ class TestHiveMetastore(RMFTestCase):
     self.assert_configure_secured()
     self.assert_configure_secured()
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
                               not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
                               not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+                              environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+                                             'HADOOP_HOME' : '/usr'},
                               user = 'hive'
                               user = 'hive'
     )
     )
 
 
@@ -175,6 +180,7 @@ class TestHiveMetastore(RMFTestCase):
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
         path = ['/bin', '/usr/bin/'],
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
@@ -279,6 +285,7 @@ class TestHiveMetastore(RMFTestCase):
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
     self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
         path = ['/bin', '/usr/bin/'],
         path = ['/bin', '/usr/bin/'],
+        environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
         not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
     )
     )
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
     self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',