Explorar el Código

AMBARI-10894 - Agent Changes For Supporting HDP 2.3 Configuration Directories (jonathanhurley)

Jonathan Hurley hace 10 años
padre
commit
03918cf3a6
Se han modificado 70 ficheros con 1003 adiciones y 650 borrados
  1. 72 0
      ambari-common/src/main/python/resource_management/libraries/script/script.py
  2. 21 13
      ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
  3. 19 3
      ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/status_params.py
  4. 1 1
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
  5. 14 9
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
  6. 21 6
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py
  7. 12 10
      ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
  8. 4 5
      ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py
  9. 1 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
  10. 38 32
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
  11. 19 6
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
  12. 39 26
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  13. 13 2
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
  14. 1 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
  15. 58 72
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
  16. 40 6
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
  17. 13 9
      ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
  18. 2 2
      ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/status_params.py
  19. 9 7
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
  20. 13 10
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
  21. 9 3
      ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py
  22. 12 6
      ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
  23. 22 26
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
  24. 20 3
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
  25. 18 12
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
  26. 14 4
      ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
  27. 2 2
      ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
  28. 11 9
      ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py
  29. 2 6
      ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
  30. 28 20
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
  31. 2 1
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/status_params.py
  32. 0 1
      ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py
  33. 22 12
      ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
  34. 20 16
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
  35. 25 6
      ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py
  36. 25 12
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
  37. 5 9
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez.py
  38. 57 37
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
  39. 6 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/status_params.py
  40. 19 20
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
  41. 18 4
      ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
  42. 16 8
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  43. 3 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
  44. 17 12
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
  45. 16 17
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
  46. 16 11
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  47. 13 13
      ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
  48. 16 14
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  49. 14 14
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
  50. 2 2
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
  51. 29 29
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
  52. 16 10
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
  53. 1 1
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
  54. 9 9
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
  55. 4 4
      ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
  56. 1 1
      ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
  57. 3 2
      ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
  58. 10 1
      ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
  59. 13 13
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_base.py
  60. 4 4
      ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py
  61. 1 1
      ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
  62. 2 2
      ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
  63. 7 7
      ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
  64. 6 6
      ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
  65. 4 4
      ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
  66. 4 4
      ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
  67. 12 12
      ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
  68. 10 10
      ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
  69. 2 2
      ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
  70. 5 5
      ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py

+ 72 - 0
ambari-common/src/main/python/resource_management/libraries/script/script.py

@@ -257,6 +257,78 @@ class Script(object):
     """
     return Script.tmp_dir
 
+  @staticmethod
+  def get_component_from_role(role_directory_map, default_role):
+    """
+    Gets the /usr/hdp/current/<component> component given an Ambari role,
+    such as DATANODE or HBASE_MASTER.
+    :return:  the component name, such as hbase-master
+    """
+    from resource_management.libraries.functions.default import default
+
+    command_role = default("/role", default_role)
+    if command_role in role_directory_map:
+      return role_directory_map[command_role]
+    else:
+      return role_directory_map[default_role]
+
+  @staticmethod
+  def get_stack_name():
+    """
+    Gets the name of the stack from hostLevelParams/stack_name.
+    :return: a stack name or None
+    """
+    from resource_management.libraries.functions.default import default
+    return default("/hostLevelParams/stack_name", None)
+
+  @staticmethod
+  def get_hdp_stack_version():
+    """
+    Gets the normalized version of the HDP stack in the form #.#.#.# if it is
+    present on the configurations sent.
+    :return: a normalized HDP stack version or None
+    """
+    stack_name = Script.get_stack_name()
+    if stack_name is None or stack_name.upper() != "HDP":
+      return None
+
+    config = Script.get_config()
+    if 'hostLevelParams' not in config or 'stack_version' not in config['hostLevelParams']:
+      return None
+
+    stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+    if stack_version_unformatted is None or stack_version_unformatted == '':
+      return None
+
+    return format_hdp_stack_version(stack_version_unformatted)
+
+  @staticmethod
+  def is_hdp_stack_greater_or_equal(compare_to_version):
+    """
+    Gets whether the hostLevelParams/stack_version, after being normalized,
+    is greater than or equal to the specified stack version
+    :param compare_to_version: the version to compare to
+    :return: True if the command's stack is greater than the specified version
+    """
+    hdp_stack_version = Script.get_hdp_stack_version()
+    if hdp_stack_version is None or hdp_stack_version == "":
+      return False
+
+    return compare_versions(hdp_stack_version, compare_to_version) >= 0
+
+  @staticmethod
+  def is_hdp_stack_less_than(compare_to_version):
+    """
+    Gets whether the hostLevelParams/stack_version, after being normalized,
+    is less than the specified stack version
+    :param compare_to_version: the version to compare to
+    :return: True if the command's stack is less than the specified version
+    """
+    hdp_stack_version = Script.get_hdp_stack_version()
+    if hdp_stack_version is None:
+      return False
+
+    return compare_versions(hdp_stack_version, compare_to_version) < 0
 
   def install(self, env):
     """

+ 21 - 13
ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py

@@ -17,10 +17,12 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management import *
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+
 import status_params
 
 # server configurations
@@ -33,23 +35,29 @@ security_enabled = status_params.security_enabled
 # hdp version
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-has_secure_user_auth = True
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') == 0:
-  has_secure_user_auth = False
 
-# accumulo local directory structure
-log_dir = config['configurations']['accumulo-env']['accumulo_log_dir']
-conf_dir = status_params.conf_dir # "/etc/accumulo/conf"
-server_conf_dir = "/etc/accumulo/conf/server"
-client_script = "/usr/hdp/current/accumulo-client/bin/accumulo"
-daemon_script = format("ACCUMULO_CONF_DIR={server_conf_dir} {client_script}")
+has_secure_user_auth = False
+if Script.is_hdp_stack_greater_or_equal("2.3"):
+  has_secure_user_auth = True
+
+# configuration directories
+conf_dir = status_params.conf_dir
+server_conf_dir = status_params.server_conf_dir
 
 # service locations
 hadoop_prefix = "/usr/hdp/current/hadoop-client"
 hadoop_bin_dir = format("{hadoop_prefix}/bin")
-hadoop_conf_dir = "/etc/hadoop/conf"
 zookeeper_home = "/usr/hdp/current/zookeeper-client"
 
+# the configuration direction for HDFS/YARN/MapR is the hadoop config
+# directory, which is symlinked by hadoop-client only
+hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
+# accumulo local directory structure
+log_dir = config['configurations']['accumulo-env']['accumulo_log_dir']
+client_script = "/usr/hdp/current/accumulo-client/bin/accumulo"
+daemon_script = format("ACCUMULO_CONF_DIR={server_conf_dir} {client_script}")
+
 # user and status
 accumulo_user = status_params.accumulo_user
 user_group = config['configurations']['cluster-env']['user_group']

+ 19 - 3
ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/status_params.py

@@ -17,17 +17,33 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
 
-from resource_management import *
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'ACCUMULO_MASTER' : 'accumulo-master',
+  'ACCUMULO_MONITOR' : 'accumulo-monitor',
+  'ACCUMULO_GC' : 'accumulo-gc',
+  'ACCUMULO_TRACER' : 'accumulo-tracer',
+  'ACCUMULO_TSERVER' : 'accumulo-tablet',
+  'ACCUMULO_CLIENT' : 'accumulo-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "ACCUMULO_CLIENT")
 
 config = Script.get_config()
 
-conf_dir = "/etc/accumulo/conf"
+conf_dir = format('/usr/hdp/current/{component_directory}/conf')
+server_conf_dir = format('{conf_dir}/server')
 pid_dir = config['configurations']['accumulo-env']['accumulo_pid_dir']
 accumulo_user = config['configurations']['accumulo-env']['accumulo_user']
 
 # Security related/required params
 hostname = config['hostname']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 tmp_dir = Script.get_tmp_dir()

+ 1 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py

@@ -42,7 +42,7 @@ def falcon(type, action = None):
               owner=params.falcon_user,
               recursive=True
     )
-    Directory(params.falcon_conf_dir_prefix,
+    Directory(params.etc_prefix_dir,
               mode=0755,
               recursive=True
     )

+ 14 - 9
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py

@@ -16,11 +16,14 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import status_params
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management import *
-from status_params import *
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
 
 config = Script.get_config()
 
@@ -31,9 +34,10 @@ version = default("/commandParams/version", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+etc_prefix_dir = "/etc/falcon"
 
 # hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
 
   # if this is a server action, then use the server binaries; smoke tests
@@ -53,12 +57,13 @@ else:
   falcon_webapp_dir = '/var/lib/falcon/webapp'
   falcon_home = '/usr/lib/falcon'
 
-hadoop_conf_dir = "/etc/hadoop/conf"
-falcon_conf_dir_prefix = "/etc/falcon"
-falcon_conf_dir = format("{falcon_conf_dir_prefix}/conf")
+hadoop_conf_dir = status_params.hadoop_conf_dir
+falcon_conf_dir = status_params.falcon_conf_dir
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+
+server_pid_file = status_params.server_pid_file
 
 user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
@@ -93,7 +98,7 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 21 - 6
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py

@@ -16,10 +16,22 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management import *
 from ambari_commons import OSCheck
 
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'FALCON_SERVER' : 'falcon-server',
+  'FALCON_CLIENT' : 'falcon-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "FALCON_CLIENT")
+
 config = Script.get_config()
 
 if OSCheck.is_windows_family():
@@ -30,13 +42,16 @@ else:
   falcon_pid_dir = config['configurations']['falcon-env']['falcon_pid_dir']
   server_pid_file = format('{falcon_pid_dir}/falcon.pid')
 
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  falcon_conf_dir = "/etc/falcon/conf"
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+    falcon_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
+
   # Security related/required params
   hostname = config['hostname']
   security_enabled = config['configurations']['cluster-env']['security_enabled']
-  hadoop_conf_dir = "/etc/hadoop/conf"
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()
-  falcon_conf_dir_prefix = "/etc/falcon"
-  falcon_conf_dir = format("{falcon_conf_dir_prefix}/conf")
   hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
   falcon_user = config['configurations']['falcon-env']['falcon_user']

+ 12 - 10
ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py

@@ -16,11 +16,12 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
 from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
 
 if OSCheck.is_windows_family():
   from params_windows import *
@@ -43,15 +44,16 @@ security_enabled = False
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop default parameters
+flume_bin = '/usr/bin/flume-ng'
+flume_hive_home = '/usr/lib/hive'
+flume_hcat_home = '/usr/lib/hive-hcatalog'
+
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   flume_bin = '/usr/hdp/current/flume-server/bin/flume-ng'
   flume_hive_home = '/usr/hdp/current/hive-metastore'
   flume_hcat_home = '/usr/hdp/current/hive-webhcat'
-else:
-  flume_bin = '/usr/bin/flume-ng'
-  flume_hive_home = '/usr/lib/hive'
-  flume_hcat_home = '/usr/lib/hive-hcatalog'
 
 java_home = config['hostLevelParams']['java_home']
 flume_log_dir = '/var/log/flume'

+ 4 - 5
ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py

@@ -16,16 +16,15 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
-from ambari_commons import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
 
 # server configurations
 config = Script.get_config()
 
 flume_conf_dir = '/etc/flume/conf'
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  flume_conf_dir = '/usr/hdp/current/flume-server/conf'
 
 flume_user = 'flume'
 flume_group = 'flume'

+ 1 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py

@@ -46,7 +46,7 @@ def hbase(name=None):
 def hbase(name=None):
   import params
 
-  Directory( params.hbase_conf_dir_prefix,
+  Directory( params.etc_prefix_dir,
       mode=0755
   )
 

+ 38 - 32
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py

@@ -17,14 +17,21 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import status_params
+import json
 
-from ambari_commons.constants import AMBARI_SUDO_BINARY
 from functions import calc_xmn_from_xms
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management import *
-import status_params
-import json
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+from resource_management.libraries.functions.substitute_vars import substitute_vars
 
 # server configurations
 config = Script.get_config()
@@ -32,32 +39,43 @@ exec_tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
 stack_name = default("/hostLevelParams/stack_name", None)
-
 version = default("/commandParams/version", None)
+component_directory = status_params.component_directory
+etc_prefix_dir = "/etc/hbase"
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop default parameters
+hadoop_bin_dir = "/usr/bin"
+hadoop_conf_dir = "/etc/hadoop/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
   hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
   daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
   region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
   region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
   hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
-else:
-  hadoop_bin_dir = "/usr/bin"
-  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+  hbase_max_direct_memory_size  = config['configurations']['hbase-env']['hbase_max_direct_memory_size']
+
+  daemon_script=format("/usr/hdp/current/{component_directory}/bin/hbase-daemon.sh")
+  region_mover = format("/usr/hdp/current/{component_directory}/bin/region_mover.rb")
+  region_drainer = format("/usr/hdp/current/{component_directory}/bin/draining_servers.rb")
+  hbase_cmd = format("/usr/hdp/current/{component_directory}/bin/hbase")
+
+
+hbase_conf_dir = status_params.hbase_conf_dir
 
 # no symlink for phoenix-server at this point
 phx_daemon_script = '/usr/hdp/current/phoenix-server/bin/queryserver.py'
 
-hadoop_conf_dir = "/etc/hadoop/conf"
-hbase_conf_dir_prefix = "/etc/hbase"
-hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = default("/commandParams/mark_draining_only",False)
 hbase_included_hosts = config['commandParams']['included_hosts']
@@ -82,8 +100,6 @@ regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver
 regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
 regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  hbase_max_direct_memory_size  = config['configurations']['hbase-env']['hbase_max_direct_memory_size']
 
 pid_dir = status_params.pid_dir
 tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
@@ -117,7 +133,7 @@ else:
 smoke_test_user = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
 smokeuser_permissions = "RWXCA"
-service_check_data = functions.get_unique_id_and_date()
+service_check_data = get_unique_id_and_date()
 user_group = config['configurations']['cluster-env']["user_group"]
 
 if security_enabled:
@@ -131,7 +147,7 @@ regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regions
 queryserver_keytab_path = config['configurations']['hbase-site']['phoenix.queryserver.keytab.file']
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 if security_enabled:
   kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
 else:
@@ -165,16 +181,6 @@ HdfsDirectory = functools.partial(
   bin_dir = hadoop_bin_dir
 )
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  command_role = default("/role", "")
-  if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
-    role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
-
-    daemon_script=format("/usr/hdp/current/hbase-{role_root}/bin/hbase-daemon.sh")
-    region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
-    region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
-    hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
-
 # ranger host
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 has_ranger_admin = not len(ranger_admin_hosts) == 0    
@@ -218,7 +224,7 @@ if has_ranger_admin:
   elif xa_audit_db_flavor.lower() == 'oracle':
     jdbc_jar_name = "ojdbc6.jar"
     jdbc_symlink_name = "oracle-jdbc-driver.jar"
-  elif nxa_audit_db_flavor.lower() == 'postgres':
+  elif xa_audit_db_flavor.lower() == 'postgres':
     jdbc_jar_name = "postgresql.jar"
     jdbc_symlink_name = "postgres-jdbc-driver.jar"
   elif xa_audit_db_flavor.lower() == 'sqlserver':

+ 19 - 6
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py

@@ -17,9 +17,22 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from ambari_commons.os_check import OSCheck
 
-from resource_management import *
-from ambari_commons import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HBASE_CLIENT' : 'hbase-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HBASE_CLIENT")
 
 config = Script.get_config()
 
@@ -33,9 +46,9 @@ else:
   # Security related/required params
   hostname = config['hostname']
   security_enabled = config['configurations']['cluster-env']['security_enabled']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()
 
-
-  hbase_conf_dir_prefix = "/etc/hbase"
-  hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
+  hbase_conf_dir = "/etc/hbase/conf"
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    hbase_conf_dir = format("/usr/hdp/current/{component_directory}/conf")

+ 39 - 26
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -17,23 +17,29 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.functions.default import default
-from resource_management import *
 import status_params
 import utils
 import json
 import os
-import itertools
 import re
 
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+from resource_management.libraries.functions.format_jvm_option import format_jvm_option
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
-
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
@@ -53,13 +59,27 @@ dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
 dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
 secure_dn_ports_are_in_use = False
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop default parameters
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+hadoop_bin = "/usr/lib/hadoop/sbin"
+hadoop_bin_dir = "/usr/bin"
+hadoop_home = "/usr/lib/hadoop"
+hadoop_secure_dn_user = hdfs_user
+hadoop_conf_dir = "/etc/hadoop/conf"
+
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
   hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
   hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
   hadoop_home = "/usr/hdp/current/hadoop-client"
+
+  # the configuration direction for HDFS/YARN/MapR is the hadoop config
+  # directory, which is symlinked by hadoop-client only
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
   if not security_enabled:
     hadoop_secure_dn_user = '""'
   else:
@@ -77,18 +97,17 @@ if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
       hadoop_secure_dn_user = hdfs_user
     else:
       hadoop_secure_dn_user = '""'
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = "/usr/lib/hadoop"
-  hadoop_secure_dn_user = hdfs_user
 
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+
 limits_conf_dir = "/etc/security/limits.d"
 
+if Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.1") and not OSCheck.is_suse_family():
+  # deprecated rhel jsvc_path
+  jsvc_path = "/usr/libexec/bigtop-utils"
+else:
+  jsvc_path = "/usr/lib/bigtop-utils"
+
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
 ulimit_cmd = "ulimit -c unlimited ; "
 
@@ -102,8 +121,8 @@ hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 
-klist_path_local = functions.get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
@@ -298,12 +317,6 @@ hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 java_home = config['hostLevelParams']['java_home']
 java_version = int(config['hostLevelParams']['java_version'])
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and not OSCheck.is_suse_family():
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/bigtop-utils"
-else:
-  jsvc_path = "/usr/lib/bigtop-utils"
-
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
 namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
@@ -366,7 +379,7 @@ if has_ranger_admin:
   elif xa_audit_db_flavor.lower() == 'oracle':
     jdbc_jar_name = "ojdbc6.jar"
     jdbc_symlink_name = "oracle-jdbc-driver.jar"
-  elif nxa_audit_db_flavor.lower() == 'postgres':
+  elif xa_audit_db_flavor.lower() == 'postgres':
     jdbc_jar_name = "postgresql.jar"
     jdbc_symlink_name = "postgres-jdbc-driver.jar"
   elif xa_audit_db_flavor.lower() == 'sqlserver':

+ 13 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py

@@ -17,9 +17,13 @@ limitations under the License.
 
 """
 
-from resource_management import *
 from ambari_commons import OSCheck
 
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
 config = Script.get_config()
 
 if OSCheck.is_windows_family():
@@ -44,6 +48,13 @@ else:
   security_enabled = config['configurations']['cluster-env']['security_enabled']
   hdfs_user_principal = config['configurations']['hadoop-env']['hdfs_principal_name']
   hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+
   hadoop_conf_dir = "/etc/hadoop/conf"
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    # the configuration direction for HDFS/YARN/MapR is the hadoop config
+    # directory, which is symlinked by hadoop-client only
+    hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
+
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()

+ 1 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py

@@ -94,7 +94,7 @@ def hive(name=None):
     setup_custom_scratchdir()
     params.HdfsDirectory(None, action="create")
 
-  Directory(params.hive_conf_dir_prefix,
+  Directory(params.hive_etc_dir_prefix,
             mode=0755
   )
 

+ 58 - 72
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py

@@ -18,15 +18,21 @@ limitations under the License.
 
 """
 
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
 import status_params
 import json
 import os
 
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -40,76 +46,61 @@ hostname = config["hostname"]
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-stack_is_hdp21 = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.1') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0
+stack_is_hdp21 = Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.2")
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
-# Hadoop params
-# TODO, this logic should initialize these parameters in a file inside the HDP 2.2 stack.
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:
-  # start out with client libraries
-  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  hadoop_home = '/usr/hdp/current/hadoop-client'
-  hive_bin = '/usr/hdp/current/hive-client/bin'
-  hive_lib = '/usr/hdp/current/hive-client/lib'
+hadoop_bin_dir = "/usr/bin"
+hadoop_home = '/usr'
+hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+hive_bin = '/usr/lib/hive/bin'
+hive_lib = '/usr/lib/hive/lib/'
+hive_var_lib = '/var/lib/hive'
+pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
+hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
+sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
+hive_specific_configs_supported = False
+hive_etc_dir_prefix = "/etc/hive"
+limits_conf_dir = "/etc/security/limits.d"
+hcat_conf_dir = '/etc/hcatalog/conf'
+config_dir = '/etc/hcatalog/conf'
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+
+# use the directories from status_params as they are already calculated for
+# the correct version of HDP
+hadoop_conf_dir = status_params.hadoop_conf_dir
+webhcat_conf_dir = status_params.webhcat_conf_dir
+hive_conf_dir = status_params.hive_conf_dir
+hive_config_dir = status_params.hive_config_dir
+hive_client_conf_dir = status_params.hive_client_conf_dir
+hive_server_conf_dir = status_params.hive_server_conf_dir
 
-  # if this is a server action, then use the server binaries; smoke tests
-  # use the client binaries
-  command_role = default("/role", "")
-  server_role_dir_mapping = { 'HIVE_SERVER' : 'hive-server2',
-    'HIVE_METASTORE' : 'hive-metastore' }
+if Script.is_hdp_stack_greater_or_equal("2.1"):
+  hcat_conf_dir = '/etc/hive-hcatalog/conf'
+  config_dir = '/etc/hive-webhcat/conf'
+  hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+  webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
 
-  if command_role in server_role_dir_mapping:
-    hive_server_root = server_role_dir_mapping[command_role]
-    hive_bin = format('/usr/hdp/current/{hive_server_root}/bin')
-    hive_lib = format('/usr/hdp/current/{hive_server_root}/lib')
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  hive_specific_configs_supported = True
+
+  component_directory = status_params.component_directory
+  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
+  hadoop_home = '/usr/hdp/current/hadoop-client'
+  hive_bin = format('/usr/hdp/current/{component_directory}/bin')
+  hive_lib = format('/usr/hdp/current/{component_directory}/lib')
 
   # there are no client versions of these, use server versions directly
   hcat_lib = '/usr/hdp/current/hive-webhcat/share/hcatalog'
   webhcat_bin_dir = '/usr/hdp/current/hive-webhcat/sbin'
 
-  hive_specific_configs_supported = True
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
-  hive_bin = '/usr/lib/hive/bin'
-  hive_lib = '/usr/lib/hive/lib/'
-  pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
-  hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
-  sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
-
-  if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
-    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-  # for newer versions
-  else:
-    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
-    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
-    
-  hive_specific_configs_supported = False
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hive_conf_dir_prefix = "/etc/hive"
-hive_conf_dir = format("{hive_conf_dir_prefix}/conf")
-hive_client_conf_dir = format("{hive_conf_dir_prefix}/conf")
-hive_server_conf_dir = format("{hive_conf_dir_prefix}/conf.server")
-limits_conf_dir = "/etc/security/limits.d"
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
-  hcat_conf_dir = '/etc/hcatalog/conf'
-  config_dir = '/etc/hcatalog/conf'
-# for newer versions
-else:
-  hcat_conf_dir = '/etc/hive-hcatalog/conf'
-  config_dir = '/etc/hive-webhcat/conf'
 
 execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
 hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
 hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 
-webhcat_conf_dir = status_params.webhcat_conf_dir
 hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
 hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
 #HACK Temporarily use dbType=azuredb while invoking schematool
@@ -145,15 +136,16 @@ templeton_port = config['configurations']['webhcat-site']['templeton.port']
 hive_metastore_hosts = config['clusterHostInfo']['hive_metastore_host']
 hive_metastore_host = hive_metastore_hosts[0]
 hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_var_lib = '/var/lib/hive'
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_hosts = config['clusterHostInfo']['hive_server_host']
 hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
+
 if hive_transport_mode.lower() == "http":
   hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
 else:
   hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
+
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
 hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
@@ -167,7 +159,7 @@ smokeuser_principal = config['configurations']['cluster-env']['smokeuser_princip
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 
 hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
@@ -177,17 +169,13 @@ hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
 hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
+
 #Default conf dir for client
 hive_conf_dirs_list = [hive_client_conf_dir]
 
 if hostname in hive_metastore_hosts or hostname in hive_server_hosts:
   hive_conf_dirs_list.append(hive_server_conf_dir)
 
-if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  hive_config_dir = hive_server_conf_dir
-else:
-  hive_config_dir = hive_client_conf_dir
-
 #hive-site
 hive_database_name = config['configurations']['hive-env']['hive_database_name']
 hive_database = config['configurations']['hive-env']['hive_database']
@@ -223,7 +211,6 @@ else:
 java64_home = config['hostLevelParams']['java_home']
 
 ##### MYSQL
-
 db_name = config['configurations']['hive-env']['hive_database_name']
 mysql_group = 'mysql'
 mysql_host = config['clusterHostInfo']['hive_mysql_host']
@@ -232,13 +219,12 @@ mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
 mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
 
 ######## Metastore Schema
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
-  init_metastore_schema = False
-else:
+init_metastore_schema = False
+if Script.is_hdp_stack_greater_or_equal("2.1"):
   init_metastore_schema = True
 
-########## HCAT
 
+########## HCAT
 hcat_dbroot = hcat_lib
 
 hcat_user = config['configurations']['hive-env']['hcat_user']
@@ -353,7 +339,7 @@ HdfsDirectory = functools.partial(
 # ranger host
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 has_ranger_admin = not len(ranger_admin_hosts) == 0
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   enable_ranger_hive = (config['configurations']['ranger-hive-plugin-properties']['ranger-hive-plugin-enabled'].lower() == 'yes')
 
 #ranger hive properties

+ 40 - 6
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py

@@ -18,8 +18,24 @@ limitations under the License.
 
 """
 
-from resource_management import *
-from ambari_commons.os_check import OSCheck
+from ambari_commons import OSCheck
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'WEBHCAT_SERVER' : 'hive-webhcat',
+  'HIVE_CLIENT' : 'hive-client',
+  'HCAT' : 'hive-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
 
 config = Script.get_config()
 
@@ -46,11 +62,29 @@ else:
   # Security related/required params
   hostname = config['hostname']
   security_enabled = config['configurations']['cluster-env']['security_enabled']
-  hadoop_conf_dir = "/etc/hadoop/conf"
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()
   hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
   hive_user = config['configurations']['hive-env']['hive_user']
-  hive_conf_dir = "/etc/hive/conf"
   webhcat_user = config['configurations']['hive-env']['webhcat_user']
-  webhcat_conf_dir = '/etc/hive-webhcat/conf'
+
+  # default configuration directories
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  webhcat_conf_dir = '/etc/hive-webhcat/conf'
+  hive_etc_dir_prefix = "/etc/hive"
+  hive_conf_dir = "/etc/hive/conf"
+  hive_client_conf_dir = "/etc/hive/conf"
+  hive_server_conf_dir = "/etc/hive/conf.server"
+
+  # HDP 2.2+
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+    webhcat_conf_dir = '/usr/hdp/current/hive-webhcat/conf'
+    hive_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
+    hive_client_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
+    hive_server_conf_dir = format("/usr/hdp/current/{component_directory}/conf/conf.server")
+
+
+  hive_config_dir = hive_client_conf_dir
+  if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
+    hive_config_dir = hive_server_conf_dir

+ 13 - 9
ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py

@@ -17,10 +17,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management.libraries.functions import format
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management.core.logger import Logger
 
 import status_params
 
@@ -35,19 +35,23 @@ host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-    kafka_home = '/usr/hdp/current/kafka-broker/'
-    kafka_bin = kafka_home+'bin/kafka'
-else:
-    kafka_home = '/usr/lib/kafka/'
-    kafka_bin = kafka_home+'/bin/kafka'
+# default kafka parameters
+kafka_home = '/usr/lib/kafka/'
+kafka_bin = kafka_home+'/bin/kafka'
+conf_dir = "/etc/kafka/conf"
+
+# parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  kafka_home = '/usr/hdp/current/kafka-broker/'
+  kafka_bin = kafka_home+'bin/kafka'
+  conf_dir = "/usr/hdp/current/kafka-broker/conf"
 
 
-conf_dir = "/etc/kafka/conf"
 kafka_user = config['configurations']['kafka-env']['kafka_user']
 kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
 kafka_pid_dir = status_params.kafka_pid_dir
 kafka_pid_file = kafka_pid_dir+"/kafka.pid"
+
 # This is hardcoded on the kafka bash process lifecycle on which we have no control over
 kafka_managed_pid_dir = "/var/run/kafka"
 kafka_managed_log_dir = "/var/log/kafka"

+ 2 - 2
ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/status_params.py

@@ -17,8 +17,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management import *
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
 
 config = Script.get_config()
 

+ 9 - 7
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py

@@ -18,13 +18,15 @@ limitations under the License.
 Ambari Agent
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
 import status_params
-import json
+
 from ambari_commons import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
 
 if OSCheck.is_windows_family():
   from params_windows import *
@@ -130,7 +132,7 @@ security_enabled = config['configurations']['cluster-env']['security_enabled']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 if security_enabled:
   knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
   _hostname_lowercase = config['hostname'].lower()
@@ -174,7 +176,7 @@ if has_ranger_admin:
   elif xa_audit_db_flavor.lower() == 'oracle':
     jdbc_jar_name = "ojdbc6.jar"
     jdbc_symlink_name = "oracle-jdbc-driver.jar"
-  elif nxa_audit_db_flavor.lower() == 'postgres':
+  elif xa_audit_db_flavor.lower() == 'postgres':
     jdbc_jar_name = "postgresql.jar"
     jdbc_symlink_name = "postgres-jdbc-driver.jar"
   elif xa_audit_db_flavor.lower() == 'sqlserver':

+ 13 - 10
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py

@@ -16,11 +16,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management import *
-from ambari_commons import OSCheck
+from resource_management.libraries.script.script import Script
 
 # server configurations
 config = Script.get_config()
@@ -31,19 +30,23 @@ knox_cert_store_path = '/var/lib/knox/data/security/keystores/gateway.jks'
 knox_user = default("/configurations/knox-env/knox_user", "knox")
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+
+# default parameters
+knox_bin = '/usr/bin/gateway'
+knox_conf_dir = '/etc/knox/conf'
+ldap_bin = '/usr/lib/knox/bin/ldap.sh'
+knox_client_bin = '/usr/lib/knox/bin/knoxcli.sh'
+
+# HDP 2.2+ parameters
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   knox_bin = '/usr/hdp/current/knox-server/bin/gateway.sh'
+  knox_conf_dir = '/usr/hdp/current/knox-server/conf'
   ldap_bin = '/usr/hdp/current/knox-server/bin/ldap.sh'
   knox_client_bin = '/usr/hdp/current/knox-server/bin/knoxcli.sh'
-else:
-  knox_bin = '/usr/bin/gateway'
-  ldap_bin = '/usr/lib/knox/bin/ldap.sh'
-  knox_client_bin = '/usr/lib/knox/bin/knoxcli.sh'
 
 knox_group = default("/configurations/knox-env/knox_group", "knox")
 mode = 0644
 
 # server configurations
-knox_conf_dir = '/etc/knox/conf'
 knox_data_dir = '/var/lib/knox/data'
 knox_logs_dir = '/var/log/knox'

+ 9 - 3
ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py

@@ -17,12 +17,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management import *
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
 
 config = Script.get_config()
 
 knox_conf_dir = '/etc/knox/conf'
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  knox_conf_dir = '/usr/hdp/current/knox-server/conf'
+
 knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
 knox_pid_file = format("{knox_pid_dir}/gateway.pid")
 ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
@@ -34,7 +39,8 @@ if security_enabled:
 else:
     knox_keytab_path = None
     knox_principal_name = None
+
 hostname = config['hostname'].lower()
 knox_user = default("/configurations/knox-env/knox_user", "knox")
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 temp_dir = Script.get_tmp_dir()

+ 12 - 6
ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py

@@ -18,9 +18,12 @@ limitations under the License.
 Ambari Agent
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
 
 # server configurations
 config = Script.get_config()
@@ -36,15 +39,18 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 version = default("/commandParams/version", None)
 
 #mahout params
-mahout_conf_dir = "/etc/mahout/conf"
 mahout_home = "/usr/hdp/current/mahout-client"
+mahout_conf_dir = "/usr/hdp/current/mahout-client/conf"
 mahout_user = config['configurations']['mahout-env']['mahout_user']
 
 #hadoop params
 hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
 hadoop_home = '/usr/hdp/current/hadoop-client'
 
-hadoop_conf_dir = "/etc/hadoop/conf"
+# the configuration direction for HDFS/YARN/MapR is the hadoop config
+# directory, which is symlinked by hadoop-client only
+hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
@@ -53,7 +59,7 @@ smokeuser_principal = config['configurations']['cluster-env']['smokeuser_princip
 user_group = config['configurations']['cluster-env']['user_group']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']

+ 22 - 26
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py

@@ -17,21 +17,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
 from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management import *
-from resource_management.core import System
-from resource_management.libraries import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions import get_port_from_url
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.resources import HdfsDirectory
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 
 import status_params
-import itertools
 import os
 
 # server configurations
@@ -50,21 +46,13 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 #hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   # start out assuming client libraries
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
   hadoop_lib_home = "/usr/hdp/current/hadoop-client/lib"
 
-  # if this is a server action, then use the server binaries; smoke tests
-  # use the client binaries
-  server_role_dir_mapping = { 'OOZIE_SERVER' : 'oozie-server',
-                              'OOZIE_SERVICE_CHECK' : 'oozie-client' }
-
-  command_role = default("/role", "")
-  if command_role not in server_role_dir_mapping:
-    command_role = 'OOZIE_SERVICE_CHECK'
-
-  oozie_root = server_role_dir_mapping[command_role]
+  # oozie-server or oozie-client, depending on role
+  oozie_root = status_params.component_directory
 
   # using the correct oozie root dir, format the correct location
   oozie_lib_dir = format("/usr/hdp/current/{oozie_root}")
@@ -78,6 +66,13 @@ if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
   oozie_home = format("/usr/hdp/current/{oozie_root}")
   oozie_bin_dir = format("/usr/hdp/current/{oozie_root}/bin")
   falcon_home = '/usr/hdp/current/falcon-client'
+
+  conf_dir = format("/usr/hdp/current/{oozie_root}/conf")
+  hive_conf_dir = format("{conf_dir}/action-conf/hive")
+
+  # the configuration direction for HDFS/YARN/MapR is the hadoop config
+  # directory, which is symlinked by hadoop-client only
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
 else:
   hadoop_bin_dir = "/usr/bin"
   hadoop_lib_home = "/usr/lib/hadoop/lib"
@@ -91,12 +86,12 @@ else:
   oozie_home = "/usr/lib/oozie"
   oozie_bin_dir = "/usr/bin"
   falcon_home = '/usr/lib/falcon'
+  hadoop_conf_dir = "/etc/hadoop/conf"
+  conf_dir = "/etc/oozie/conf"
+  hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
 
 execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
 
-hadoop_conf_dir = "/etc/hadoop/conf"
-conf_dir = "/etc/oozie/conf"
-hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
@@ -122,7 +117,8 @@ oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.Had
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
 http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
 oozie_site = config['configurations']['oozie-site']
-if security_enabled and hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') < 0:
+
+if security_enabled and Script.is_hdp_stack_less_than("2.2"):
   #older versions of oozie have problems when using _HOST in principal
   oozie_site = dict(config['configurations']['oozie-site'])
   oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
@@ -147,7 +143,7 @@ oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oo
 oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0:
+if Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.2"):
   put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
 # for newer
 else:

+ 20 - 3
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py

@@ -18,8 +18,21 @@ limitations under the License.
 
 """
 
-from resource_management import *
-from ambari_commons import OSCheck
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_CLIENT' : 'oozie-client',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
 
 config = Script.get_config()
 
@@ -31,8 +44,12 @@ else:
   pid_file = format("{oozie_pid_dir}/oozie.pid")
 
   security_enabled = config['configurations']['cluster-env']['security_enabled']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
   conf_dir = "/etc/oozie/conf"
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    conf_dir = format("/usr/hdp/current/{component_directory}/conf")
+
   tmp_dir = Script.get_tmp_dir()
   oozie_user = config['configurations']['oozie-env']['oozie_user']
   hostname = config["hostname"]

+ 18 - 12
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py

@@ -18,9 +18,12 @@ limitations under the License.
 Ambari Agent
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
 
 # server configurations
 config = Script.get_config()
@@ -34,18 +37,21 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop default parameters
+pig_conf_dir = "/etc/pig/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_bin_dir = "/usr/bin"
+hadoop_home = '/usr'
+pig_bin_dir = ""
+
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  pig_conf_dir = "/usr/hdp/current/pig-client/conf"
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
   hadoop_home = '/usr/hdp/current/hadoop-client'
   pig_bin_dir = '/usr/hdp/current/pig-client/bin'
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  pig_bin_dir = ""
 
-hadoop_conf_dir = "/etc/hadoop/conf"
-pig_conf_dir = "/etc/pig/conf"
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
@@ -54,7 +60,7 @@ smokeuser_principal = config['configurations']['cluster-env']['smokeuser_princip
 user_group = config['configurations']['cluster-env']['user_group']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 pig_env_sh_template = config['configurations']['pig-env']['content']
 
 # not supporting 32 bit jdk.

+ 14 - 4
ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py

@@ -17,11 +17,21 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management.libraries.functions import format
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'RANGER_ADMIN' : 'ranger-admin',
+  'RANGER_USERSYNC' : 'ranger-usersync'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "RANGER_ADMIN")
+
 config  = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
@@ -32,12 +42,12 @@ host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
-stack_is_hdp23_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.3') >= 0
+stack_is_hdp22_or_further = Script.is_hdp_stack_greater_or_equal("2.2")
+stack_is_hdp23_or_further = Script.is_hdp_stack_greater_or_equal("2.3")
 
 if stack_is_hdp22_or_further:
   ranger_home    = '/usr/hdp/current/ranger-admin'
-  ranger_conf    = '/etc/ranger/admin/conf'
+  ranger_conf    = '/usr/hdp/current/ranger-admin/conf'
   ranger_stop    = '/usr/bin/ranger-admin-stop'
   ranger_start   = '/usr/bin/ranger-admin-start'
   usersync_home  = '/usr/hdp/current/ranger-usersync'

+ 2 - 2
ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py

@@ -18,7 +18,7 @@ limitations under the License.
 
 """
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 
@@ -31,7 +31,7 @@ version = default("/commandParams/version", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-stack_is_hdp23_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.3') >= 0
+stack_is_hdp23_or_further = Script.is_hdp_stack_greater_or_equal("2.3")
 
 if stack_is_hdp23_or_further:
   kms_home = '/usr/hdp/current/ranger-kms'

+ 11 - 9
ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py

@@ -17,11 +17,12 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management import *
-from ambari_commons import OSCheck
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
 
 if OSCheck.is_windows_family():
   from params_windows import *
@@ -42,17 +43,18 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 #hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+slider_bin_dir = "/usr/lib/slider/bin"
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   slider_bin_dir = '/usr/hdp/current/slider-client/bin'
-else:
-  slider_bin_dir = "/usr/lib/slider/bin"
 
-hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+slider_conf_dir = "/usr/hdp/current/slider-client/conf"
+
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 slider_env_sh_template = config['configurations']['slider-env']['content']
 
 java64_home = config['hostLevelParams']['java_home']

+ 2 - 6
ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py

@@ -16,15 +16,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
-from ambari_commons import OSCheck
+from resource_management.libraries.script.script import Script
 
 # server configurations
 config = Script.get_config()
 
-slider_conf_dir = "/etc/slider/conf"
+slider_conf_dir = "/usr/hdp/current/slider-client/conf"
 storm_slider_conf_dir = '/usr/hdp/current/storm-slider-client/conf'
 slider_home_dir = '/usr/hdp/current/slider-client'

+ 28 - 20
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py

@@ -18,12 +18,28 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
-from setup_spark import *
+
 import status_params
 
+from setup_spark import *
+
+import resource_management.libraries.functions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
+  'SPARK_CLIENT' : 'spark-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
+
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
@@ -42,29 +58,21 @@ version = default("/commandParams/version", None)
 # Commenting out for time being
 #stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2.1.0') >= 0
 
-stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
+spark_conf = '/etc/spark/conf'
+hadoop_conf_dir = "/etc/hadoop/conf"
 
-if stack_is_hdp22_or_further:
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   hadoop_home = "/usr/hdp/current/hadoop-client"
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  spark_conf = '/etc/spark/conf'
+  spark_conf = format("/usr/hdp/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
   spark_pid_dir = status_params.spark_pid_dir
-  spark_role_root = "spark-client"
+  spark_home = format("/usr/hdp/current/{component_directory}")
 
-  command_role = default("/role", "")
-
-  if command_role == "SPARK_CLIENT":
-    spark_role_root = "spark-client"
-  elif command_role == "SPARK_JOBHISTORYSERVER":
-    spark_role_root = "spark-historyserver"
-
-  spark_home = format("/usr/hdp/current/{spark_role_root}")
-else:
-  pass
 
 java_home = config['hostLevelParams']['java_home']
-hadoop_conf_dir = "/etc/hadoop/conf"
+
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
@@ -119,7 +127,7 @@ if spark_javaopts_properties.find('-Dhdp.version') == -1:
   spark_javaopts_properties = spark_javaopts_properties+ ' -Dhdp.version=' + str(hdp_full_version)
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 spark_kerberos_keytab =  config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
 spark_kerberos_principal =  config['configurations']['spark-defaults']['spark.history.kerberos.principal']
 

+ 2 - 1
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/status_params.py

@@ -18,7 +18,8 @@ limitations under the License.
 
 """
 
-from resource_management import *
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
 
 config = Script.get_config()
 

+ 0 - 1
ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py

@@ -16,7 +16,6 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
 from ambari_commons import OSCheck
 from resource_management.libraries.functions.default import default
 

+ 22 - 12
ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py

@@ -17,11 +17,19 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_kinit_path import get_kinit_path
 from resource_management.libraries.script import Script
 
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SQOOP' : 'sqoop-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SQOOP")
+
 config = Script.get_config()
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 
@@ -33,23 +41,25 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  sqoop_conf_dir = '/etc/sqoop/conf'
+# default hadoop params
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+sqoop_lib = "/usr/lib/sqoop/lib"
+hadoop_home = '/usr/lib/hadoop'
+hbase_home = "/usr/lib/hbase"
+hive_home = "/usr/lib/hive"
+sqoop_bin_dir = "/usr/bin"
+zoo_conf_dir = "/etc/zookeeper"
+
+# HDP 2.2+ params
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  sqoop_conf_dir = '/usr/hdp/current/sqoop-client/conf'
   sqoop_lib = '/usr/hdp/current/sqoop-client/lib'
   hadoop_home = '/usr/hdp/current/hbase-client'
   hbase_home = '/usr/hdp/current/hbase-client'
   hive_home = '/usr/hdp/current/hive-client'
   sqoop_bin_dir = '/usr/hdp/current/sqoop-client/bin/'
-else:
-  sqoop_conf_dir = "/usr/lib/sqoop/conf"
-  sqoop_lib = "/usr/lib/sqoop/lib"
-  hadoop_home = '/usr/lib/hadoop'
-  hbase_home = "/usr/lib/hbase"
-  hive_home = "/usr/lib/hive"
-  sqoop_bin_dir = "/usr/bin"
+  zoo_conf_dir = "/usr/hdp/current/zookeeper-client/conf"
 
-zoo_conf_dir = "/etc/zookeeper"
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']

+ 20 - 16
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py

@@ -17,14 +17,16 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import re
+import json
+
+import status_params
+
 from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default, format
-import status_params
-import re
-import json
 
 def get_bare_principal(normalized_principal_name):
   """
@@ -51,27 +53,29 @@ tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
 stack_name = default("/hostLevelParams/stack_name", None)
-
 version = default("/commandParams/version", None)
 
+conf_dir = status_params.conf_dir
+
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
+stack_is_hdp22_or_further = Script.is_hdp_stack_greater_or_equal("2.2")
+
+# default hadoop params
+rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
+storm_bin_dir = "/usr/bin"
+storm_lib_dir = "/usr/lib/storm/lib/"
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop parameters for 2.2+
+if stack_is_hdp22_or_further:
   rest_lib_dir = '/usr/hdp/current/storm-client/contrib/storm-rest'
   storm_bin_dir = "/usr/hdp/current/storm-client/bin"
   storm_lib_dir = "/usr/hdp/current/storm-client/lib"
-else:
-  rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
-  storm_bin_dir = "/usr/bin"
-  storm_lib_dir = "/usr/lib/storm/lib/"
+
 
 storm_user = config['configurations']['storm-env']['storm_user']
 log_dir = config['configurations']['storm-env']['storm_log_dir']
 pid_dir = status_params.pid_dir
-conf_dir = "/etc/storm/conf"
 local_dir = config['configurations']['storm-site']['storm.local.dir']
 user_group = config['configurations']['cluster-env']['user_group']
 java64_home = config['hostLevelParams']['java_home']
@@ -102,7 +106,7 @@ if security_enabled:
   storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
   storm_keytab_path = config['configurations']['storm-env']['storm_keytab']
 
-  if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  if stack_is_hdp22_or_further:
     storm_ui_keytab_path = config['configurations']['storm-env']['storm_ui_keytab']
     _storm_ui_jaas_principal_name = config['configurations']['storm-env']['storm_ui_principal_name']
     storm_ui_jaas_principal = _storm_ui_jaas_principal_name.replace('_HOST',_hostname_lowercase)
@@ -167,7 +171,7 @@ if has_ranger_admin:
   elif xa_audit_db_flavor.lower() == 'oracle':
     jdbc_jar_name = "ojdbc6.jar"
     jdbc_symlink_name = "oracle-jdbc-driver.jar"
-  elif nxa_audit_db_flavor.lower() == 'postgres':
+  elif xa_audit_db_flavor.lower() == 'postgres':
     jdbc_jar_name = "postgresql.jar"
     jdbc_symlink_name = "postgres-jdbc-driver.jar"
   elif xa_audit_db_flavor.lower() == 'sqlserver':

+ 25 - 6
ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py

@@ -22,6 +22,18 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import default, format
 from ambari_commons import OSCheck
 
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'NIMBUS' : 'storm-nimbus',
+  'SUPERVISOR' : 'storm-supervisor',
+  'STORM_UI_SERVER' : 'storm-client',
+  'DRPC_SERVER' : 'storm-client',
+  'STORM_SERVICE_CHECK' : 'storm-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "STORM_SERVICE_CHECK")
+
 config = Script.get_config()
 
 if OSCheck.is_windows_family():
@@ -36,19 +48,26 @@ else:
   pid_ui = format("{pid_dir}/ui.pid")
   pid_logviewer = format("{pid_dir}/logviewer.pid")
   pid_rest_api = format("{pid_dir}/restapi.pid")
-  pid_files = {"logviewer":pid_logviewer,
-               "ui": pid_ui,
-               "nimbus": pid_nimbus,
-               "supervisor": pid_supervisor,
-               "drpc": pid_drpc,
-               "rest_api": pid_rest_api}
+
+  pid_files = {
+    "logviewer":pid_logviewer,
+    "ui": pid_ui,
+    "nimbus": pid_nimbus,
+    "supervisor": pid_supervisor,
+    "drpc": pid_drpc,
+    "rest_api": pid_rest_api
+  }
 
   # Security related/required params
   hostname = config['hostname']
   security_enabled = config['configurations']['cluster-env']['security_enabled']
   kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()
+
   conf_dir = "/etc/storm/conf"
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    conf_dir = format("/usr/hdp/current/{component_directory}/conf")
+
   storm_user = config['configurations']['storm-env']['storm_user']
   storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
   storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)

+ 25 - 12
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py

@@ -17,9 +17,14 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+import os
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
 
 # server configurations
 config = Script.get_config()
@@ -34,15 +39,27 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# default hadoop parameters
+hadoop_home = '/usr'
+hadoop_bin_dir = "/usr/bin"
+hadoop_conf_dir = "/etc/hadoop/conf"
+tez_etc_dir = "/etc/tez"
+config_dir = "/etc/tez/conf"
+path_to_tez_examples_jar = "/usr/lib/tez/tez-mapreduce-examples*.jar"
+
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
   path_to_tez_examples_jar = "/usr/hdp/{hdp_version}/tez/tez-examples*.jar"
-else:
-  hadoop_bin_dir = "/usr/bin"
-  path_to_tez_examples_jar = "/usr/lib/tez/tez-mapreduce-examples*.jar"
-hadoop_conf_dir = "/etc/hadoop/conf"
 
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+# tez only started linking /usr/hdp/x.x.x.x/tez-client/conf in HDP 2.3+
+if Script.is_hdp_stack_greater_or_equal("2.3"):
+  # !!! use realpath for now since the symlink exists but is broken and a
+  # broken symlink messes with the DirectoryProvider class
+  config_dir = os.path.realpath("/usr/hdp/current/tez-client/conf")
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
@@ -51,10 +68,6 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 
-config_dir_prefix = "/etc/tez"
-config_dir = format("{config_dir_prefix}/conf")
-
-hadoop_home = '/usr'
 java64_home = config['hostLevelParams']['java_home']
 
 tez_user = config['configurations']['tez-env']['tez_user']

+ 5 - 9
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez.py

@@ -27,14 +27,12 @@ from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 def tez():
   import params
 
-  Directory(params.config_dir_prefix,
-            mode=0755
-  )
+  Directory(params.tez_etc_dir, mode=0755)
+
   Directory(params.config_dir,
             owner = params.tez_user,
             group = params.user_group,
-            recursive = True
-  )
+            recursive = True)
 
   XmlConfig( "tez-site.xml",
              conf_dir = params.config_dir,
@@ -42,13 +40,11 @@ def tez():
              configuration_attributes=params.config['configuration_attributes']['tez-site'],
              owner = params.tez_user,
              group = params.user_group,
-             mode = 0664
-  )
+             mode = 0664)
 
   File(format("{config_dir}/tez-env.sh"),
        owner=params.tez_user,
-       content=InlineTemplate(params.tez_env_sh_template)
-  )
+       content=InlineTemplate(params.tez_env_sh_template))
 
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)

+ 57 - 37
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py

@@ -19,11 +19,30 @@ Ambari Agent
 
 """
 import os
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management import *
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_directory import HdfsDirectory
+
 import status_params
 
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+MAPR_SERVER_ROLE_DIRECTORY_MAP = {
+  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
+  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
+}
+
+YARN_SERVER_ROLE_DIRECTORY_MAP = {
+  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
+  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
+  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
+  'YARN_CLIENT' : 'hadoop-yarn-client'
+}
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -39,46 +58,47 @@ version = default("/commandParams/version", None)
 
 hostname = config['hostname']
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  yarn_role_root = "hadoop-yarn-client"
-  mapred_role_root = "hadoop-mapreduce-client"
+# hadoop default parameters
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+hadoop_bin = "/usr/lib/hadoop/sbin"
+hadoop_bin_dir = "/usr/bin"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
 
+  # MapR directory root
+  mapred_role_root = "hadoop-mapreduce-client"
   command_role = default("/role", "")
-  if command_role == "APP_TIMELINE_SERVER":
-    yarn_role_root = "hadoop-yarn-timelineserver"
-  elif command_role == "HISTORYSERVER":
-    mapred_role_root = "hadoop-mapreduce-historyserver"
-  elif command_role == "MAPREDUCE2_CLIENT":
-    mapred_role_root = "hadoop-mapreduce-client"
-  elif command_role == "NODEMANAGER":
-    yarn_role_root = "hadoop-yarn-nodemanager"
-  elif command_role == "RESOURCEMANAGER":
-    yarn_role_root = "hadoop-yarn-resourcemanager"
-  elif command_role == "YARN_CLIENT":
-    yarn_role_root = "hadoop-yarn-client"
-
-  hadoop_libexec_dir          = "/usr/hdp/current/hadoop-client/libexec"
-  hadoop_bin                  = "/usr/hdp/current/hadoop-client/sbin"
-  hadoop_bin_dir              = "/usr/hdp/current/hadoop-client/bin"
+  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
+    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
+
+  # YARN directory root
+  yarn_role_root = "hadoop-yarn-client"
+  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
+    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
+
+  hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
+  hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"
+  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
 
   hadoop_mapred2_jar_location = format("/usr/hdp/current/{mapred_role_root}")
-  mapred_bin                  = format("/usr/hdp/current/{mapred_role_root}/sbin")
+  mapred_bin = format("/usr/hdp/current/{mapred_role_root}/sbin")
+
+  hadoop_yarn_home = format("/usr/hdp/current/{yarn_role_root}")
+  yarn_bin = format("/usr/hdp/current/{yarn_role_root}/sbin")
+  yarn_container_bin = format("/usr/hdp/current/{yarn_role_root}/bin")
+
+  # the configuration direction for HDFS/YARN/MapR is the hadoop config
+  # directory, which is symlinked by hadoop-client only
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
 
-  hadoop_yarn_home            = format("/usr/hdp/current/{yarn_role_root}")
-  yarn_bin                    = format("/usr/hdp/current/{yarn_role_root}/sbin")
-  yarn_container_bin          = format("/usr/hdp/current/{yarn_role_root}/bin")
-else:
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-  hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-  mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-  yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-  yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 
-hadoop_conf_dir = "/etc/hadoop/conf"
 limits_conf_dir = "/etc/security/limits.d"
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
 
@@ -93,7 +113,7 @@ smokeuser_principal = config['configurations']['cluster-env']['smokeuser_princip
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 rm_hosts = config['clusterHostInfo']['rm_host']
 rm_host = rm_hosts[0]
 rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]

+ 6 - 0
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/status_params.py

@@ -52,6 +52,12 @@ else:
 
   # Security related/required params
   hadoop_conf_dir = "/etc/hadoop/conf"
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    # the configuration direction for HDFS/YARN/MapR is the hadoop config
+    # directory, which is symlinked by hadoop-client only
+    hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
+
   hostname = config['hostname']
   kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   security_enabled = config['configurations']['cluster-env']['security_enabled']

+ 19 - 20
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py

@@ -18,11 +18,13 @@ limitations under the License.
 Ambari Agent
 
 """
+import status_params
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
-from resource_management import *
-import status_params
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
 
 # server configurations
 config = Script.get_config()
@@ -33,29 +35,26 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 stack_name = default("/hostLevelParams/stack_name", None)
 current_version = default("/hostLevelParams/current_version", None)
+component_directory = status_params.component_directory
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  role_root = "zookeeper-client"
-  command_role = default("/role", "")
-
-  if command_role == "ZOOKEEPER_SERVER":
-    role_root = "zookeeper-server"
+# default parameters
+zk_home = "/usr"
+zk_bin = "/usr/lib/zookeeper/bin"
+zk_cli_shell = "/usr/lib/zookeeper/bin/zkCli.sh"
+config_dir = "/etc/zookeeper/conf"
 
-  zk_home = format("/usr/hdp/current/{role_root}")
-  zk_bin = format("/usr/hdp/current/{role_root}/bin")
-  zk_cli_shell = format("/usr/hdp/current/{role_root}/bin/zkCli.sh")
-else:
-  zk_home = "/usr"
-  zk_bin = "/usr/lib/zookeeper/bin"
-  zk_cli_shell = "/usr/lib/zookeeper/bin/zkCli.sh"
+# hadoop parameters for 2.2+
+if Script.is_hdp_stack_greater_or_equal("2.2"):
+  zk_home = format("/usr/hdp/current/{component_directory}")
+  zk_bin = format("/usr/hdp/current/{component_directory}/bin")
+  zk_cli_shell = format("/usr/hdp/current/{component_directory}/bin/zkCli.sh")
+  config_dir = status_params.config_dir
 
 
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['zookeeper-env']['zk_user']
+zk_user = config['configurations']['zookeeper-env']['zk_user']
 hostname = config['hostname']
 user_group = config['configurations']['cluster-env']['user_group']
 zk_env_sh_template = config['configurations']['zookeeper-env']['content']
@@ -91,7 +90,7 @@ security_enabled = config['configurations']['cluster-env']['security_enabled']
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 
 #log4j.properties
 if ('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j']):

+ 18 - 4
ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py

@@ -17,9 +17,20 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-from resource_management import *
 from ambari_commons import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/hdp/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'ZOOKEEPER_SERVER' : 'zookeeper-server',
+  'ZOOKEEPER_CLIENT' : 'zookeeper-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "ZOOKEEPER_CLIENT")
 
 config = Script.get_config()
 
@@ -32,7 +43,10 @@ else:
   # Security related/required params
   hostname = config['hostname']
   security_enabled = config['configurations']['cluster-env']['security_enabled']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()
-  config_dir = "/etc/zookeeper/conf"
   zk_user =  config['configurations']['zookeeper-env']['zk_user']
+
+  config_dir = "/etc/zookeeper/conf"
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    config_dir = format("/usr/hdp/current/{component_directory}/conf")

+ 16 - 8
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -29,27 +29,35 @@ sudo = AMBARI_SUDO_BINARY
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# default hadoop params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+# HDP 2.2+ params
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
   hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
+  # not supported in HDP 2.2+
+  hadoop_conf_empty_dir = None
 
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 versioned_hdp_root = '/usr/hdp/current'
+
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
+
 #java params
 java_home = config['hostLevelParams']['java_home']
+
 #hadoop params
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
 
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0  and not OSCheck.is_suse_family():
+if Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.1") and not OSCheck.is_suse_family():
   # deprecated rhel jsvc_path
   jsvc_path = "/usr/libexec/bigtop-utils"
 else:

+ 3 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -34,10 +34,11 @@ def setup_config():
   import params
   stackversion = params.stack_version_unformatted
   if params.has_namenode or stackversion.find('Gluster') >= 0:
+    # create core-site only if the hadoop config diretory exists
     XmlConfig("core-site.xml",
               conf_dir=params.hadoop_conf_dir,
               configurations=params.config['configurations']['core-site'],
               configuration_attributes=params.config['configuration_attributes']['core-site'],
               owner=params.hdfs_user,
-              group=params.user_group
-    )
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))

+ 17 - 12
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -69,11 +69,26 @@ def is_secure_port(port):
   else:
     return False
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+hadoop_home = "/usr/lib/hadoop"
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_hdp_root = '/usr/hdp/current'
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+# HDP 2.2+ params
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
   hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
   hadoop_home = "/usr/hdp/current/hadoop-client"
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+
+  # not supported in HDP 2.2+
+  hadoop_conf_empty_dir = None
+
   if not security_enabled:
     hadoop_secure_dn_user = '""'
   else:
@@ -91,16 +106,6 @@ if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
       hadoop_secure_dn_user = hdfs_user
     else:
       hadoop_secure_dn_user = '""'
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_home = "/usr/lib/hadoop"
-  hadoop_secure_dn_user = hdfs_user
-
-hadoop_dir = "/etc/hadoop"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-versioned_hdp_root = '/usr/hdp/current'
 
 #hadoop params
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']

+ 16 - 17
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py

@@ -170,20 +170,19 @@ def setup_hadoop_env():
     else:
       tc_owner = params.hdfs_user
 
-    Directory(params.hadoop_dir,
-              mode=0755
-    )
-    Directory(params.hadoop_conf_empty_dir,
-              recursive=True,
-              owner="root",
-              group=params.user_group
-    )
-    Link(params.hadoop_conf_dir,
-         to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}")
-    )
-    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
-         owner=tc_owner,
-         group=params.user_group,
-         content=InlineTemplate(params.hadoop_env_sh_template)
-    )
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_hdp_stack_less_than("2.2"):
+      Directory(params.hadoop_conf_empty_dir, recursive=True, owner="root",
+        group=params.user_group )
+
+      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}"))
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))

+ 16 - 11
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -28,24 +28,29 @@ config = Script.get_config()
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+hadoop_lib_home = "/usr/lib/hadoop/lib"
+hadoop_bin = "/usr/lib/hadoop/sbin"
+hadoop_home = '/usr'
+create_lib_snappy_symlinks = True
+hadoop_conf_dir = "/etc/hadoop/conf"
+default_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+
+# HDP 2.2+ params
+if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
   hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
   hadoop_lib_home = "/usr/hdp/current/hadoop-client/lib"
   hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"
   hadoop_home = '/usr/hdp/current/hadoop-client'
   create_lib_snappy_symlinks = False
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_lib_home = "/usr/lib/hadoop/lib"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_home = '/usr'
-  create_lib_snappy_symlinks = True
+  hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+  default_topology_script_file_path = "/usr/hdp/current/hadoop-client/conf/topology_script.py"
 
 current_service = config['serviceName']
-hadoop_conf_dir = "/etc/hadoop/conf"
+
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
@@ -183,7 +188,7 @@ all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 
 #topology files
-net_topology_script_file_path = default("/configurations/core-site/net.topology.script.file.name","/etc/hadoop/conf/topology_script.py")
+net_topology_script_file_path = default("/configurations/core-site/net.topology.script.file.name",default_topology_script_file_path)
 net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
 net_topology_mapping_data_file_name = 'topology_mappings.data'
 net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)

+ 13 - 13
ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py

@@ -102,7 +102,7 @@ class TestFlumeHandler(RMFTestCase):
   @patch("resource_management.libraries.script.Script.put_structured_out")
   @patch("sys.exit")
   def test_status_default(self, sys_exit_mock, structured_out_mock):
-    
+
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
@@ -113,7 +113,7 @@ class TestFlumeHandler(RMFTestCase):
     except:
       # expected since ComponentIsNotRunning gets raised
       pass
-    
+
     # test that the method was called with empty processes
     self.assertTrue(structured_out_mock.called)
     structured_out_mock.assert_called_with({'processes': []})
@@ -130,7 +130,7 @@ class TestFlumeHandler(RMFTestCase):
    script.load_structured_out()
 
    self.assertFalse("version" in script.structuredOut)
-    
+
 
   @patch("resource_management.libraries.script.Script.put_structured_out")
   @patch("glob.glob")
@@ -148,7 +148,7 @@ class TestFlumeHandler(RMFTestCase):
     except:
       # expected since ComponentIsNotRunning gets raised
       pass
-    
+
     self.assertTrue(structured_out_mock.called)
 
     # call_args[0] is a tuple, whose first element is the actual call argument
@@ -156,7 +156,7 @@ class TestFlumeHandler(RMFTestCase):
     self.assertTrue(struct_out.has_key('processes'))
 
     self.assertNoMoreResources()
-    
+
   @patch("resource_management.libraries.script.Script.put_structured_out")
   @patch("glob.glob")
   @patch("sys.exit")
@@ -173,13 +173,13 @@ class TestFlumeHandler(RMFTestCase):
     except:
       # expected since ComponentIsNotRunning gets raised
       pass
-      
+
     self.assertTrue(structured_out_mock.called)
 
     # call_args[0] is a tuple, whose first element is the actual call argument
     struct_out = structured_out_mock.call_args[0][0]
     self.assertTrue(struct_out.has_key('processes'))
-    self.assertNoMoreResources()    
+    self.assertNoMoreResources()
 
   def assert_configure_default(self):
 
@@ -400,24 +400,24 @@ class TestFlumeHandler(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    self.assertResourceCalled('Directory', '/etc/flume/conf', recursive=True)
+    self.assertResourceCalled('Directory', '/usr/hdp/current/flume-server/conf', recursive=True)
 
     self.assertResourceCalled('Directory', '/var/log/flume', owner = 'flume')
 
-    self.assertResourceCalled('Directory', '/etc/flume/conf/a1')
+    self.assertResourceCalled('Directory', '/usr/hdp/current/flume-server/conf/a1')
 
-    self.assertResourceCalled('PropertiesFile', '/etc/flume/conf/a1/flume.conf',
+    self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/flume-server/conf/a1/flume.conf',
       mode = 0644,
       properties = build_flume(
         self.getConfig()['configurations']['flume-conf']['content'])['a1'])
 
     self.assertResourceCalled('File',
-      '/etc/flume/conf/a1/log4j.properties',
+      '/usr/hdp/current/flume-server/conf/a1/log4j.properties',
       content = Template('log4j.properties.j2', agent_name = 'a1'),
       mode = 0644)
 
     self.assertResourceCalled('File',
-      '/etc/flume/conf/a1/ambari-meta.json',
+      '/usr/hdp/current/flume-server/conf/a1/ambari-meta.json',
       content='{"channels_count": 1, "sinks_count": 1, "sources_count": 1}',
       mode = 0644)
 
@@ -425,7 +425,7 @@ class TestFlumeHandler(RMFTestCase):
 
     self.assertTrue(content.get_content().find('/usr/hdp/current/hive-metastore') > -1)
 
-    self.assertResourceCalled('File', "/etc/flume/conf/a1/flume-env.sh",
+    self.assertResourceCalled('File', "/usr/hdp/current/flume-server/conf/a1/flume-env.sh",
                               owner="flume",
                               content=content)
 

+ 16 - 14
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -443,7 +443,7 @@ class TestHBaseMaster(RMFTestCase):
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755)
 
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-master/conf',
       owner = 'hbase',
       group = 'hadoop',
       recursive = True)
@@ -469,45 +469,47 @@ class TestHBaseMaster(RMFTestCase):
     self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-master/conf',
       configurations = self.getConfig()['configurations']['hbase-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
+
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-master/conf',
       configurations = self.getConfig()['configurations']['core-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'])
+
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-master/conf',
       configurations = self.getConfig()['configurations']['hdfs-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
 
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
 
     self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-master/conf',
       configurations = self.getConfig()['configurations']['hbase-policy'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy'])
 
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-master/conf/hbase-env.sh',
       owner = 'hbase',
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
 
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-MASTER')
 
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/regionservers',
       owner = 'hbase',
       template_tag = None)
 
@@ -520,7 +522,7 @@ class TestHBaseMaster(RMFTestCase):
       recursive = True)
 
     self.assertResourceCalled('File',
-                              '/etc/hbase/conf/log4j.properties',
+                              '/usr/hdp/current/hbase-master/conf/log4j.properties',
                               mode=0644,
                               group='hadoop',
                               owner='hbase',
@@ -529,7 +531,7 @@ class TestHBaseMaster(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', 'hdfs://nn1/apps/hbase/data',
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'hbase',
@@ -539,7 +541,7 @@ class TestHBaseMaster(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               mode = 0711,
@@ -550,13 +552,13 @@ class TestHBaseMaster(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', None,
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               bin_dir = '/usr/hdp/current/hadoop-client/bin',
                               action = ['create'])
 
-    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-master/conf start master',
       not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
       user = 'hbase')
 

+ 14 - 14
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py

@@ -370,7 +370,7 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755)
 
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
       owner = 'hbase',
       group = 'hadoop',
       recursive = True)
@@ -396,46 +396,46 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
       configurations = self.getConfig()['configurations']['hbase-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
     )
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
       configurations = self.getConfig()['configurations']['hdfs-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
 
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
 
     self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
       configurations = self.getConfig()['configurations']['hbase-policy'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy'])
 
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
       owner = 'hbase',
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
 
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
       owner = 'hbase',
       template_tag = 'GANGLIA-RS')
 
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/regionservers',
       owner = 'hbase',
       template_tag = None)
 
@@ -448,7 +448,7 @@ class TestHbaseRegionServer(RMFTestCase):
       recursive = True)
 
     self.assertResourceCalled('File',
-                              '/etc/hbase/conf/log4j.properties',
+                              '/usr/hdp/current/hbase-regionserver/conf/log4j.properties',
                               mode=0644,
                               group='hadoop',
                               owner='hbase',
@@ -457,7 +457,7 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', 'hdfs://nn1/apps/hbase/data',
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               owner = 'hbase',
@@ -467,7 +467,7 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               mode = 0711,
@@ -478,13 +478,13 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', None,
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               hdfs_user = 'hdfs',
                               kinit_path_local = "/usr/bin/kinit",
                               bin_dir = '/usr/hdp/current/hadoop-client/bin',
                               action = ['create'])
 
-    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-regionserver/conf start regionserver',
       not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
       user = 'hbase')
 

+ 2 - 2
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py

@@ -114,13 +114,13 @@ class TestServiceCheck(RMFTestCase):
       content = Template('hbase-smoke.sh.j2'),
       mode = 0755,
     )
-    self.assertResourceCalled('Execute', ' /usr/hdp/current/hbase-client/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+    self.assertResourceCalled('Execute', ' /usr/hdp/current/hbase-client/bin/hbase --config /usr/hdp/current/hbase-client/conf shell /tmp/hbase-smoke.sh',
       logoutput = True,
       tries = 3,
       user = 'ambari-qa',
       try_sleep = 5,
     )
-    self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf  /usr/hdp/current/hbase-client/bin/hbase',
+    self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /usr/hdp/current/hbase-client/conf  /usr/hdp/current/hbase-client/bin/hbase',
       logoutput = True,
       tries = 3,
       user = 'ambari-qa',

+ 29 - 29
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py

@@ -49,7 +49,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     )
     self.assert_configure_default()
     self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py start',
-                            environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/etc/hbase/conf'},
+                            environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
                             user = 'hbase'
     )
     self.assertNoMoreResources()
@@ -66,7 +66,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
         on_timeout = '! ( ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/phoenix-hbase-server.pid`',
         timeout = 30,
-        environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/etc/hbase/conf'},
+        environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
         user = 'hbase'
     )
     
@@ -96,7 +96,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     )
     self.assert_configure_secured()
     self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py start',
-                          environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/etc/hbase/conf'},
+                          environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
                           user = 'hbase'
     )
     self.assertNoMoreResources()
@@ -113,7 +113,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
         on_timeout = '! ( ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/phoenix-hbase-server.pid`',
         timeout = 30,
-        environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/etc/hbase/conf'},
+        environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
         user = 'hbase'
     )
     
@@ -133,7 +133,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755)
 
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
       owner = 'hbase',
       group = 'hadoop',
       recursive = True)
@@ -141,17 +141,17 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
       owner = 'hbase',
       group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
+      conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
       configurations = self.getConfig()['configurations']['hbase-site'],
       configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
     )
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
       owner = 'hbase',
       content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
 
@@ -181,7 +181,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('Directory', '/etc/hbase',
                               mode = 0755
     )
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
                               owner = 'hbase',
                               group = 'hadoop',
                               recursive = True,
@@ -207,47 +207,47 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['hbase-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
     )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
     )
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
     )
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
     )
     self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['hbase-policy'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
                               )
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
                               owner = 'hbase',
                               content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
                               )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
                               owner = 'hbase',
                               template_tag = 'GANGLIA-RS',
                               )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/regionservers',
                               owner = 'hbase',
                               template_tag = None,
                               )
@@ -260,7 +260,7 @@ class TestPhoenixQueryServer(RMFTestCase):
                               recursive = True,
                               )
     self.assertResourceCalled('File',
-                              '/etc/hbase/conf/log4j.properties',
+                              '/usr/hdp/current/hbase-regionserver/conf/log4j.properties',
                               mode=0644,
                               group='hadoop',
                               owner='hbase',
@@ -271,7 +271,7 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('Directory', '/etc/hbase',
                               mode = 0755
     )
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
                               owner = 'hbase',
                               group = 'hadoop',
                               recursive = True,
@@ -297,51 +297,51 @@ class TestPhoenixQueryServer(RMFTestCase):
     self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['hbase-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
     )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
     )
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
     )
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
     )
     self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
                               owner = 'hbase',
                               group = 'hadoop',
-                              conf_dir = '/etc/hbase/conf',
+                              conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
                               configurations = self.getConfig()['configurations']['hbase-policy'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
     )
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
                               owner = 'hbase',
                               content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
                               )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
                               owner = 'hbase',
                               template_tag = 'GANGLIA-RS',
                               )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/regionservers',
                               owner = 'hbase',
                               template_tag = None,
                               )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_queryserver_jaas.conf',
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hbase_queryserver_jaas.conf',
                               owner = 'hbase',
                               template_tag = None,
                               )
@@ -354,7 +354,7 @@ class TestPhoenixQueryServer(RMFTestCase):
                               recursive = True,
                               )
     self.assertResourceCalled('File',
-                              '/etc/hbase/conf/log4j.properties',
+                              '/usr/hdp/current/hbase-regionserver/conf/log4j.properties',
                               mode=0644,
                               group='hadoop',
                               owner='hbase',

+ 16 - 10
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py

@@ -20,6 +20,7 @@ limitations under the License.
 from stacks.utils.RMFTestCase import *
 import json
 from mock.mock import MagicMock, patch
+from resource_management.libraries.script import Script
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
 
@@ -162,7 +163,7 @@ class TestDatanode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
-    self.assert_configure_secured()
+    self.assert_configure_secured("2.2")
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',
@@ -180,7 +181,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode',
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
     )
@@ -203,7 +204,7 @@ class TestDatanode(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
-    self.assert_configure_secured()
+    self.assert_configure_secured("2.2")
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
                               group = 'hadoop',
@@ -221,7 +222,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
     )
@@ -295,7 +296,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = None,
     )
@@ -339,7 +340,7 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
+    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
         not_if = None,
     )
@@ -394,7 +395,11 @@ class TestDatanode(RMFTestCase):
                               cd_access='a'
                               )
 
-  def assert_configure_secured(self):
+  def assert_configure_secured(self, stackVersion=STACK_VERSION):
+    conf_dir = '/etc/hadoop/conf'
+    if stackVersion != self.STACK_VERSION:
+      conf_dir = '/usr/hdp/current/hadoop-client/conf'
+
     self.assertResourceCalled('Directory', '/etc/security/limits.d',
                               owner = 'root',
                               group = 'root',
@@ -409,19 +414,20 @@ class TestDatanode(RMFTestCase):
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = conf_dir,
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
                               )
+
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = conf_dir,
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
                               mode = 0644
     )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
+    self.assertResourceCalled('File', conf_dir + '/slaves',
                               content = Template('slaves.j2'),
                               owner = 'root',
                               )

+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -634,7 +634,7 @@ class TestHiveServer(RMFTestCase):
      call_mocks = [(0,"hive-server2 - 2.2.0.0-2041"), (0,"hive-server2 - 2.2.0.0-2041")]
     )
 
-    self.assertResourceCalled('Execute', 'hive --config /etc/hive/conf.server --service hiveserver2 --deregister 2.2.0.0-2041',
+    self.assertResourceCalled('Execute', 'hive --config /usr/hdp/current/hive-server2/conf/conf.server --service hiveserver2 --deregister 2.2.0.0-2041',
       path=['/bin:/usr/hdp/current/hive-server2/bin:/usr/hdp/current/hadoop-client/bin'],
       tries=1, user='hive')
 

+ 9 - 9
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py

@@ -148,7 +148,7 @@ class TestOozieClient(RMFTestCase):
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
-    self.assertResourceCalled('Directory', '/etc/oozie/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/oozie-client/conf',
                               owner = 'oozie',
                               group = 'hadoop',
                               recursive = True
@@ -157,39 +157,39 @@ class TestOozieClient(RMFTestCase):
                               owner = 'oozie',
                               group = 'hadoop',
                               mode = 0664,
-                              conf_dir = '/etc/oozie/conf',
+                              conf_dir = '/usr/hdp/current/oozie-client/conf',
                               configurations = self.getConfig()['configurations']['oozie-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['oozie-site']
     )
-    self.assertResourceCalled('File', '/etc/oozie/conf/oozie-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/oozie-client/conf/oozie-env.sh',
                               owner = 'oozie',
                               content = InlineTemplate(self.getConfig()['configurations']['oozie-env']['content'])
     )
-    self.assertResourceCalled('File', '/etc/oozie/conf/oozie-log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/oozie-client/conf/oozie-log4j.properties',
                               owner = 'oozie',
                               group = 'hadoop',
                               mode = 0644,
                               content = 'log4jproperties\nline2'
     )
-    self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
+    self.assertResourceCalled('File', '/usr/hdp/current/oozie-client/conf/adminusers.txt',
                               content = Template('adminusers.txt.j2'),
                               owner = 'oozie',
                               group = 'hadoop',
                               mode=0644,
                               )
-    self.assertResourceCalled('File', '/etc/oozie/conf/hadoop-config.xml',
+    self.assertResourceCalled('File', '/usr/hdp/current/oozie-client/conf/hadoop-config.xml',
                               owner = 'oozie',
                               group = 'hadoop',
                               )
-    self.assertResourceCalled('File', '/etc/oozie/conf/oozie-default.xml',
+    self.assertResourceCalled('File', '/usr/hdp/current/oozie-client/conf/oozie-default.xml',
                               owner = 'oozie',
                               group = 'hadoop',
                               )
-    self.assertResourceCalled('Directory', '/etc/oozie/conf/action-conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/oozie-client/conf/action-conf',
                               owner = 'oozie',
                               group = 'hadoop',
                               )
-    self.assertResourceCalled('File', '/etc/oozie/conf/action-conf/hive.xml',
+    self.assertResourceCalled('File', '/usr/hdp/current/oozie-client/conf/action-conf/hive.xml',
                               owner = 'oozie',
                               group = 'hadoop',
                               )

+ 4 - 4
ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py

@@ -109,23 +109,23 @@ class TestPigClient(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
-    self.assertResourceCalled('Directory', '/etc/pig/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/pig-client/conf',
                               recursive = True,
                               owner = 'hdfs',
                               group = 'hadoop'
     )
-    self.assertResourceCalled('File', '/etc/pig/conf/pig-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/pig-client/conf/pig-env.sh',
                               owner = 'hdfs',
                               mode=0755,
                               content = InlineTemplate(self.getConfig()['configurations']['pig-env']['content'])
     )
-    self.assertResourceCalled('File', '/etc/pig/conf/pig.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/pig-client/conf/pig.properties',
                               owner = 'hdfs',
                               group = 'hadoop',
                               mode = 0644,
                               content = 'pigproperties\nline2'
     )
-    self.assertResourceCalled('File', '/etc/pig/conf/log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/pig-client/conf/log4j.properties',
                               owner = 'hdfs',
                               group = 'hadoop',
                               mode = 0644,

+ 1 - 1
ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py

@@ -76,7 +76,7 @@ class TestServiceCheck(RMFTestCase):
                        content = StaticFile('zkSmoke.sh'),
                        mode = 0755,
     )
-    self.assertResourceCalled('Execute', '/tmp/zkSmoke.sh /usr/hdp/current/zookeeper-client/bin/zkCli.sh ambari-qa /etc/zookeeper/conf 2181 False /usr/bin/kinit no_keytab no_principal',
+    self.assertResourceCalled('Execute', '/tmp/zkSmoke.sh /usr/hdp/current/zookeeper-client/bin/zkCli.sh ambari-qa /usr/hdp/current/zookeeper-client/conf 2181 False /usr/bin/kinit no_keytab no_principal',
                        logoutput = True,
                        path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                        tries = 3,

+ 3 - 2
ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py

@@ -35,6 +35,7 @@ class TestHookAfterInstall(RMFTestCase):
                               group = 'hadoop',
                               conf_dir = '/etc/hadoop/conf',
                               configurations = self.getConfig()['configurations']['core-site'],
-                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
-                              )
+                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+                              only_if="ls /etc/hadoop/conf")
+
     self.assertNoMoreResources()

+ 10 - 1
ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py

@@ -24,7 +24,16 @@ from resource_management import Hook
 
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeInstall(RMFTestCase):
-  def test_hook_default(self):
+  @patch("os.path.exists")
+  def test_hook_default(self, os_path_exists_mock):
+
+    def side_effect(path):
+      if path == "/etc/hadoop/conf":
+        return True
+      return False
+
+    os_path_exists_mock.side_effect = side_effect
+
     self.executeScript("2.0.6/hooks/before-ANY/scripts/hook.py",
                        classname="BeforeAnyHook",
                        command="hook",

+ 13 - 13
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_base.py

@@ -28,7 +28,7 @@ class TestStormBase(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "STORM/0.9.1.2.1/package"
   STACK_VERSION = "2.1"
 
-  def assert_configure_default(self):
+  def assert_configure_default(self, confDir="/etc/storm/conf"):
     import params
     self.assertResourceCalled('Directory', '/var/log/storm',
       owner = 'storm',
@@ -48,28 +48,28 @@ class TestStormBase(RMFTestCase):
       recursive = True,
       cd_access='a'
     )
-    self.assertResourceCalled('Directory', '/etc/storm/conf',
+    self.assertResourceCalled('Directory', confDir,
       group = 'hadoop',
       recursive = True,
       cd_access='a'
     )
-    self.assertResourceCalled('File', '/etc/storm/conf/config.yaml',
+    self.assertResourceCalled('File', confDir + '/config.yaml',
       owner = 'storm',
       content = Template('config.yaml.j2'),
       group = 'hadoop',
     )
     
-    storm_yarn_content = self.call_storm_template_and_assert()
+    storm_yarn_content = self.call_storm_template_and_assert(confDir=confDir)
     
     self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
 
-    self.assertResourceCalled('File', '/etc/storm/conf/storm-env.sh',
+    self.assertResourceCalled('File', confDir + '/storm-env.sh',
                               owner = 'storm',
                               content = InlineTemplate(self.getConfig()['configurations']['storm-env']['content'])
                               )
     return storm_yarn_content
 
-  def assert_configure_secured(self):
+  def assert_configure_secured(self, confDir='/etc/storm/conf'):
     import params
     self.assertResourceCalled('Directory', '/var/log/storm',
       owner = 'storm',
@@ -89,36 +89,36 @@ class TestStormBase(RMFTestCase):
       recursive = True,
       cd_access='a'
     )
-    self.assertResourceCalled('Directory', '/etc/storm/conf',
+    self.assertResourceCalled('Directory', confDir,
       group = 'hadoop',
       recursive = True,
       cd_access='a'
     )
-    self.assertResourceCalled('File', '/etc/storm/conf/config.yaml',
+    self.assertResourceCalled('File', confDir + '/config.yaml',
       owner = 'storm',
       content = Template('config.yaml.j2'),
       group = 'hadoop',
     )
-    storm_yarn_content = self.call_storm_template_and_assert()
+    storm_yarn_content = self.call_storm_template_and_assert(confDir=confDir)
     
     self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
     
-    self.assertResourceCalled('File', '/etc/storm/conf/storm-env.sh',
+    self.assertResourceCalled('File', confDir + '/storm-env.sh',
                               owner = 'storm',
                               content = InlineTemplate(self.getConfig()['configurations']['storm-env']['content'])
                               )
-    self.assertResourceCalled('TemplateConfig', '/etc/storm/conf/storm_jaas.conf',
+    self.assertResourceCalled('TemplateConfig', confDir + '/storm_jaas.conf',
       owner = 'storm',
     )
     return storm_yarn_content
 
-  def call_storm_template_and_assert(self):
+  def call_storm_template_and_assert(self, confDir="/etc/storm/conf"):
     import yaml_utils
 
     with RMFTestCase.env as env:
       storm_yarn_temlate = yaml_utils.yaml_config_template(self.getConfig()['configurations']['storm-site'])
 
-      self.assertResourceCalled('File', '/etc/storm/conf/storm.yaml',
+      self.assertResourceCalled('File', confDir + '/storm.yaml',
         owner = 'storm',
         content= storm_yarn_temlate,
         group = 'hadoop'

+ 4 - 4
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py

@@ -70,7 +70,7 @@ class TestStormJaasConfiguration(TestStormBase):
     self.assert_configure_secured()
 
   def assert_configure_default(self):
-    storm_yarn_content = super(TestStormJaasConfiguration, self).assert_configure_default()
+    storm_yarn_content = super(TestStormJaasConfiguration, self).assert_configure_default(confDir="/usr/hdp/current/storm-nimbus/conf")
     
     self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
       
@@ -80,11 +80,11 @@ class TestStormJaasConfiguration(TestStormBase):
 
   def assert_configure_secured(self):
 
-    storm_yarn_content = super(TestStormJaasConfiguration, self).assert_configure_secured()
-    self.assertResourceCalled('TemplateConfig', '/etc/storm/conf/client_jaas.conf',
+    storm_yarn_content = super(TestStormJaasConfiguration, self).assert_configure_secured(confDir="/usr/hdp/current/storm-nimbus/conf")
+    self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/storm-nimbus/conf/client_jaas.conf',
       owner = 'storm',
     )
-    self.assertResourceCalled('File', '/etc/storm/conf/worker-launcher.cfg',
+    self.assertResourceCalled('File', '/usr/hdp/current/storm-nimbus/conf/worker-launcher.cfg',
       owner = 'root',
       content = Template('worker-launcher.cfg.j2', min_user_ruid = 500),
       group = 'hadoop',

+ 1 - 1
ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json

@@ -41,7 +41,7 @@
         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]", 
         "group_list": "[\"hadoop\",\"users\"]", 
         "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]", 
-        "stack_version": "2.2", 
+        "stack_version": "2.2",
         "stack_name": "HDP", 
         "db_name": "ambari", 
         "ambari_db_rca_driver": "org.postgresql.Driver", 

+ 2 - 2
ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py

@@ -50,7 +50,7 @@ class TestKafkaBroker(RMFTestCase):
                               cd_access = 'a'
     )
 
-    self.assertResourceCalled('Directory', '/etc/kafka/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/conf',
                               owner = 'kafka',
                               group = 'hadoop',
                               recursive = True,
@@ -86,7 +86,7 @@ class TestKafkaBroker(RMFTestCase):
                               cd_access = 'a'
     )
 
-    self.assertResourceCalled('Directory', '/etc/kafka/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/conf',
                               owner = 'kafka',
                               group = 'hadoop',
                               recursive = True,

La diferencia del archivo ha sido suprimido porque es demasiado grande
+ 7 - 7
ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py


+ 6 - 6
ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py

@@ -38,11 +38,11 @@ class TestPigServiceCheck(RMFTestCase):
                        hdp_stack_version=self.STACK_VERSION,
                        target=RMFTestCase.TARGET_COMMON_SERVICES
     )
-    self.assertResourceCalled("ExecuteHadoop", "dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ",
+    self.assertResourceCalled("ExecuteHadoop", "dfs -rmr pigsmoke.out passwd; hadoop --config /usr/hdp/current/hadoop-client/conf dfs -put /etc/passwd passwd ",
       try_sleep=5,
       tries=3,
       user="ambari-qa",
-      conf_dir="/etc/hadoop/conf",
+      conf_dir="/usr/hdp/current/hadoop-client/conf",
       security_enabled=True,
       principal="ambari-qa@EXAMPLE.COM",
       keytab="/etc/security/keytabs/smokeuser.headless.keytab",
@@ -65,15 +65,15 @@ class TestPigServiceCheck(RMFTestCase):
     self.assertResourceCalled("ExecuteHadoop", "fs -test -e pigsmoke.out",
       user="ambari-qa",
       bin_dir="/usr/hdp/current/hadoop-client/bin",
-      conf_dir="/etc/hadoop/conf"
+      conf_dir="/usr/hdp/current/hadoop-client/conf"
     )
 
     # Specific to HDP 2.2 and kerberized cluster
-    self.assertResourceCalled("ExecuteHadoop", "dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ",
+    self.assertResourceCalled("ExecuteHadoop", "dfs -rmr pigsmoke.out passwd; hadoop --config /usr/hdp/current/hadoop-client/conf dfs -put /etc/passwd passwd ",
       tries=3,
       try_sleep=5,
       user="ambari-qa",
-      conf_dir="/etc/hadoop/conf",
+      conf_dir="/usr/hdp/current/hadoop-client/conf",
       keytab="/etc/security/keytabs/smokeuser.headless.keytab",
       principal="ambari-qa@EXAMPLE.COM",
       security_enabled=True,
@@ -96,7 +96,7 @@ class TestPigServiceCheck(RMFTestCase):
     self.assertResourceCalled("ExecuteHadoop", "fs -test -e pigsmoke.out",
       user="ambari-qa",
       bin_dir="/usr/hdp/current/hadoop-client/bin",
-      conf_dir="/etc/hadoop/conf"
+      conf_dir="/usr/hdp/current/hadoop-client/conf"
     )
     self.assertNoMoreResources()
 

+ 4 - 4
ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py

@@ -124,10 +124,10 @@ class TestRangerAdmin(RMFTestCase):
         logoutput = True,
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
     )
-    self.assertResourceCalled('ModifyPropertiesFile', '/etc/ranger/admin/conf/xa_system.properties',
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/xa_system.properties',
         properties = self.getConfig()['configurations']['ranger-site'],
     )
-    self.assertResourceCalled('ModifyPropertiesFile', '/etc/ranger/admin/conf/ranger_webserver.properties',
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/ranger_webserver.properties',
         mode = 0744,
         properties = self.getConfig()['configurations']['ranger-site']
     )
@@ -153,10 +153,10 @@ class TestRangerAdmin(RMFTestCase):
         logoutput = True,
         environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
     )
-    self.assertResourceCalled('ModifyPropertiesFile', '/etc/ranger/admin/conf/xa_system.properties',
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/xa_system.properties',
         properties = self.getConfig()['configurations']['ranger-site'],
     )
-    self.assertResourceCalled('ModifyPropertiesFile', '/etc/ranger/admin/conf/ranger_webserver.properties',
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/conf/ranger_webserver.properties',
         mode = 0744,
         properties = self.getConfig()['configurations']['ranger-site']
     )

+ 4 - 4
ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py

@@ -36,17 +36,17 @@ class TestSliderClient(RMFTestCase):
     )
 
     self.assertResourceCalled('Directory',
-                              '/etc/slider/conf',
+                              '/usr/hdp/current/slider-client/conf',
                               recursive=True
     )
 
     self.assertResourceCalled('XmlConfig',
                               'slider-client.xml',
-                              conf_dir='/etc/slider/conf',
+                              conf_dir='/usr/hdp/current/slider-client/conf',
                               configurations=self.getConfig()['configurations']['slider-client']
     )
 
-    self.assertResourceCalled('File', '/etc/slider/conf/slider-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/slider-client/conf/slider-env.sh',
                               content = InlineTemplate(self.getConfig()['configurations']['slider-env']['content']),
                               mode = 0755,
                               )
@@ -62,7 +62,7 @@ class TestSliderClient(RMFTestCase):
                               )
 
     self.assertResourceCalled('File',
-                              '/etc/slider/conf/log4j.properties',
+                              '/usr/hdp/current/slider-client/conf/log4j.properties',
                               mode=0644,
                               content='log4jproperties\nline2'
     )

+ 12 - 12
ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py

@@ -131,7 +131,7 @@ class TestJobHistoryServer(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', '/user/spark',
         security_enabled = False,
         keytab = UnknownConfigurationMock(),
-        conf_dir = '/etc/hadoop/conf',
+        conf_dir = '/usr/hdp/current/hadoop-client/conf',
         hdfs_user = 'hdfs',
         kinit_path_local = '/usr/bin/kinit',
         mode = 0775,
@@ -139,26 +139,26 @@ class TestJobHistoryServer(RMFTestCase):
         bin_dir = '/usr/hdp/current/hadoop-client/bin',
         action = ['create'],
     )
-    self.assertResourceCalled('PropertiesFile', '/etc/spark/conf/spark-defaults.conf',
+    self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
         key_value_delimiter = ' ',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/spark-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
         content = InlineTemplate(self.getConfig()['configurations']['spark-env']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/log4j.properties',
         content = '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO',
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/metrics.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/metrics.properties',
         content = InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/java-opts',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/java-opts',
         content = '  -Dhdp.version=2.3.0.0-1597',
         owner = 'spark',
         group = 'spark',
@@ -178,7 +178,7 @@ class TestJobHistoryServer(RMFTestCase):
     self.assertResourceCalled('HdfsDirectory', '/user/spark',
         security_enabled = True,
         keytab = UnknownConfigurationMock(),
-        conf_dir = '/etc/hadoop/conf',
+        conf_dir = '/usr/hdp/current/hadoop-client/conf',
         hdfs_user = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         mode = 0775,
@@ -186,26 +186,26 @@ class TestJobHistoryServer(RMFTestCase):
         bin_dir = '/usr/hdp/current/hadoop-client/bin',
         action = ['create'],
     )
-    self.assertResourceCalled('PropertiesFile', '/etc/spark/conf/spark-defaults.conf',
+    self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
         key_value_delimiter = ' ',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/spark-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
         content = InlineTemplate(self.getConfig()['configurations']['spark-env']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/log4j.properties',
         content = '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO',
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/metrics.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/metrics.properties',
         content = InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/java-opts',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/java-opts',
         content = '  -Dhdp.version=2.3.0.0-1597',
         owner = 'spark',
         group = 'spark',

+ 10 - 10
ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py

@@ -59,26 +59,26 @@ class TestSparkClient(RMFTestCase):
         group = 'hadoop',
         recursive = True,
     )
-    self.assertResourceCalled('PropertiesFile', '/etc/spark/conf/spark-defaults.conf',
+    self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
         key_value_delimiter = ' ',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/spark-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
         content = InlineTemplate(self.getConfig()['configurations']['spark-env']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/log4j.properties',
         content = '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO',
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/metrics.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/metrics.properties',
         content = InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/java-opts',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/java-opts',
         content = '  -Dhdp.version=2.3.0.0-1597',
         owner = 'spark',
         group = 'spark',
@@ -95,26 +95,26 @@ class TestSparkClient(RMFTestCase):
         group = 'hadoop',
         recursive = True,
     )
-    self.assertResourceCalled('PropertiesFile', '/etc/spark/conf/spark-defaults.conf',
+    self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
         key_value_delimiter = ' ',
         properties = self.getConfig()['configurations']['spark-defaults'],
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/spark-env.sh',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
         content = InlineTemplate(self.getConfig()['configurations']['spark-env']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/log4j.properties',
         content = '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO',
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/metrics.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/metrics.properties',
         content = InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content']),
         owner = 'spark',
         group = 'spark',
     )
-    self.assertResourceCalled('File', '/etc/spark/conf/java-opts',
+    self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/java-opts',
         content = '  -Dhdp.version=2.3.0.0-1597',
         owner = 'spark',
         group = 'spark',

+ 2 - 2
ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py

@@ -35,12 +35,12 @@ class TestMahoutClient(RMFTestCase):
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
-    self.assertResourceCalled('Directory', '/etc/mahout/conf',
+    self.assertResourceCalled('Directory', '/usr/hdp/current/mahout-client/conf',
                               owner = 'mahout',
                               group = 'hadoop',
                               recursive = True,
                               )
-    self.assertResourceCalled('File', '/etc/mahout/conf/log4j.properties',
+    self.assertResourceCalled('File', '/usr/hdp/current/mahout-client/conf/log4j.properties',
                               content = self.getConfig()['configurations']['mahout-log4j']['content'],
                               owner = 'mahout',
                               group = 'hadoop',

+ 5 - 5
ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py

@@ -36,7 +36,7 @@ class TestMahoutClient(RMFTestCase):
     self.assertResourceCalled('ExecuteHadoop', 'fs -rm -r -f /user/ambari-qa/mahoutsmokeoutput /user/ambari-qa/mahoutsmokeinput',
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               try_sleep = 5,
                               kinit_path_local = '/usr/bin/kinit',
                               tries = 3,
@@ -49,7 +49,7 @@ class TestMahoutClient(RMFTestCase):
                               tries = 3,
                               bin_dir = '/usr/hdp/current/hadoop-client/bin',
                               user = 'ambari-qa',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               )
     self.assertResourceCalled('File', '/tmp/sample-mahout-test.txt',
                               content = 'Test text which will be converted to sequence file.',
@@ -60,12 +60,12 @@ class TestMahoutClient(RMFTestCase):
                               tries = 3,
                               bin_dir = '/usr/hdp/current/hadoop-client/bin',
                               user = 'ambari-qa',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               )
     self.assertResourceCalled('Execute', 'mahout seqdirectory --input /user/ambari-qa/mahoutsmokeinput/'
                                          'sample-mahout-test.txt --output /user/ambari-qa/mahoutsmokeoutput/ '
                                          '--charset utf-8',
-                              environment = {'HADOOP_CONF_DIR': '/etc/hadoop/conf',
+                              environment = {'HADOOP_CONF_DIR': '/usr/hdp/current/hadoop-client/conf',
                                              'HADOOP_HOME': '/usr/hdp/current/hadoop-client',
                                              'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
                                              'MAHOUT_HOME': '/usr/hdp/current/mahout-client'},
@@ -79,7 +79,7 @@ class TestMahoutClient(RMFTestCase):
                               tries = 10,
                               bin_dir = '/usr/hdp/current/hadoop-client/bin',
                               user = 'ambari-qa',
-                              conf_dir = '/etc/hadoop/conf',
+                              conf_dir = '/usr/hdp/current/hadoop-client/conf',
                               )
     self.assertNoMoreResources()
 

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio