Browse Source

AMBARI-11347. Optimize creating hdfs resources by using Webhdfs instead of hadoop fs jar calls (aonishuk)

Andrew Onishuk 10 years ago
parent
commit
aa51bd75b8
41 changed files with 931 additions and 184 deletions
  1. 31 0
      ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
  2. 117 0
      ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
  3. 320 32
      ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
  4. 5 0
      ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
  5. 34 0
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
  6. 11 1
      ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
  7. 10 1
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
  8. 10 1
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
  9. 10 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
  10. 2 2
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
  11. 4 2
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
  12. 1 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
  13. 9 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  14. 0 11
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
  15. 10 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
  16. 10 1
      ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
  17. 9 4
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
  18. 9 1
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
  19. 4 4
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
  20. 7 2
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
  21. 11 1
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
  22. 11 1
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
  23. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
  24. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
  25. 15 6
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  26. 83 16
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  27. 13 1
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
  28. 14 14
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
  29. 24 6
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
  30. 30 12
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
  31. 5 5
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
  32. 32 10
      ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
  33. 11 11
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
  34. 6 6
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
  35. 3 3
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
  36. 3 3
      ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
  37. 3 3
      ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
  38. 32 10
      ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
  39. 13 1
      ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
  40. 4 4
      ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
  41. 1 1
      ambari-server/src/test/python/stacks/utils/RMFTestCase.py

+ 31 - 0
ambari-common/src/main/python/resource_management/libraries/functions/jmx.py

@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import urllib2
+import json
+
+def get_value_from_jmx(qry, property):
+  try:
+    response = urllib2.urlopen(qry)
+    data = response.read()
+    if data:
+      data_dict = json.loads(data)
+      return data_dict["beans"][0][property]
+  except:
+    return None

+ 117 - 0
ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py

@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from resource_management.libraries.script import UnknownConfiguration
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.jmx import get_value_from_jmx
+from resource_management.core.base import Fail
+__all__ = ["get_namenode_states", "get_active_namenode", "get_property_for_active_namenode"]
+
+HDFS_NN_STATE_ACTIVE = 'active'
+HDFS_NN_STATE_STANDBY = 'standby'
+
+NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
+NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
+JMX_URI_FRAGMENT = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+  
+def get_namenode_states(hdfs_site):
+  """
+  return format [('nn1', 'hdfs://hostname1:port1'), ('nn2', 'hdfs://hostname2:port2')] , [....], [....]
+  """
+  active_namenodes = []
+  standby_namenodes = []
+  unknown_namenodes = []
+  
+  name_service = hdfs_site['dfs.nameservices']
+  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
+
+  # now we have something like 'nn1,nn2,nn3,nn4'
+  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
+  # ie dfs.namenode.http-address.hacluster.nn1
+  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
+  for nn_unique_id in nn_unique_ids:
+    is_https_enabled = hdfs_site['dfs.https.enabled'] if not is_empty(hdfs_site['dfs.https.enabled']) else False
+    
+    if not is_https_enabled:
+      key = NAMENODE_HTTP_FRAGMENT.format(name_service,nn_unique_id)
+    else:
+      key = "https://" + NAMENODE_HTTPS_FRAGMENT.format(name_service,nn_unique_id)
+
+    if key in hdfs_site:
+      # use str() to ensure that unicode strings do not have the u' in them
+      value = str(hdfs_site[key])
+
+      jmx_uri = JMX_URI_FRAGMENT.format(value)
+      state = get_value_from_jmx(jmx_uri,'State')
+      
+      if state == HDFS_NN_STATE_ACTIVE:
+        active_namenodes.append((nn_unique_id, value))
+      elif state == HDFS_NN_STATE_STANDBY:
+        standby_namenodes.append((nn_unique_id, value))
+      else:
+        unknown_namenodes.append((nn_unique_id, value))
+        
+  return active_namenodes, standby_namenodes, unknown_namenodes
+
+def is_ha_enabled(hdfs_site):
+  dfs_ha_nameservices = hdfs_site['dfs.nameservices']
+  
+  if is_empty(dfs_ha_nameservices):
+    return False
+  
+  dfs_ha_namenode_ids = hdfs_site[format("dfs.ha.namenodes.{dfs_ha_nameservices}")]
+  
+  if not is_empty(dfs_ha_namenode_ids):
+    dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+    dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+    if dfs_ha_namenode_ids_array_len > 1:
+      return True
+      
+  return False
+
+def get_active_namenode(hdfs_site):
+  """
+  return format is nn_unique_id and it's address ('nn1', 'hdfs://hostname1:port1')
+  """
+  active_namenodes = get_namenode_states(hdfs_site)[0]
+  if active_namenodes:
+    return active_namenodes[0]
+  else:
+    return UnknownConfiguration('fs_root')
+  
+def get_property_for_active_namenode(hdfs_site, property_name):
+  """
+  For dfs.namenode.rpc-address:
+    - In non-ha mode it will return hdfs_site[dfs.namenode.rpc-address]
+    - In ha-mode it will return hdfs_site[dfs.namenode.rpc-address.nnha.nn2], where nnha is the name of HA, and nn2 is id of active NN
+  """
+  if is_ha_enabled(hdfs_site):
+    name_service = hdfs_site['dfs.nameservices']
+    active_namenodes = get_namenode_states(hdfs_site)[0]
+    
+    if not len(active_namenodes):
+      raise Fail("There is no active namenodes.")
+    
+    active_namenode_id = active_namenodes[0][0]
+    
+    return hdfs_site[format("{property_name}.{name_service}.{active_namenode_id}")]
+  else:
+    return hdfs_site[property_name]
+  

+ 320 - 32
ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py

@@ -20,13 +20,18 @@ Ambari Agent
 
 """
 import json
+import re
+import os
 from resource_management.core.environment import Environment
 from resource_management.core.base import Fail
 from resource_management.core.resources.system import Execute
 from resource_management.core.resources.system import File
 from resource_management.core.providers import Provider
 from resource_management.core.logger import Logger
+from resource_management.core import shell
 from resource_management.libraries.functions import format
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions import namenode_ha_utils
 
 
 JSON_PATH = '/var/lib/ambari-agent/data/hdfs_resources.json'
@@ -45,61 +50,57 @@ RESOURCE_TO_JSON_FIELDS = {
   'change_permissions_for_parents': 'changePermissionforParents'
 }
 
-
-class HdfsResourceProvider(Provider):
-  def action_delayed(self, action_name):
+class HdfsResourceJar:
+  """
+  This is slower than HdfsResourceWebHDFS implementation of HdfsResouce, but it works in any cases on any DFS types.
+  
+  The idea is to put all the files/directories/copyFromLocals we have to create/delete into a json file.
+  And then create in it with ONLY ONE expensive hadoop call to our custom jar fast-hdfs-resource.jar which grabs this json.
+  
+  'create_and_execute' and 'delete_on_execute' does nothing but add files/directories to this json,
+  while execute does all the expensive creating/deleting work executing the jar with the json as parameter.
+  """
+  def action_delayed(self, action_name, main_resource):
     resource = {}
     env = Environment.get_instance()
     if not 'hdfs_files' in env.config:
       env.config['hdfs_files'] = []
 
-    # Check required parameters
-    if not self.resource.type or not self.resource.action:
-      raise Fail("Resource parameter type or action is not set.")
-
     # Put values in dictionary-resource
     for field_name, json_field_name in RESOURCE_TO_JSON_FIELDS.iteritems():
       if field_name == 'action':
         resource[json_field_name] = action_name
-      elif field_name == 'mode' and self.resource.mode:
-        resource[json_field_name] = oct(self.resource.mode)[1:]
-      elif getattr(self.resource, field_name):
-        resource[json_field_name] = getattr(self.resource, field_name)
+      elif field_name == 'mode' and main_resource.resource.mode:
+        resource[json_field_name] = oct(main_resource.resource.mode)[1:]
+      elif getattr(main_resource.resource, field_name):
+        resource[json_field_name] = getattr(main_resource.resource, field_name)
 
     # Add resource to create
     env.config['hdfs_files'].append(resource)
-
-  def action_create_on_execute(self):
-    self.action_delayed("create")
-
-  def action_delete_on_execute(self):
-    self.action_delayed("delete")
-
-  def action_execute(self):
+    
+  def action_execute(self, main_resource):
     env = Environment.get_instance()
 
     # Check required parameters
-    if not self.resource.user:
-      raise Fail("Resource parameter 'user' is not set.")
+    main_resource.assert_parameter_is_set('user')
 
     if not 'hdfs_files' in env.config or not env.config['hdfs_files']:
       Logger.info("No resources to create. 'create_on_execute' or 'delete_on_execute' wasn't triggered before this 'execute' action.")
       return
-
-    hadoop_bin_dir = self.resource.hadoop_bin_dir
-    hadoop_conf_dir = self.resource.hadoop_conf_dir
-    user = self.resource.user
-    security_enabled = self.resource.security_enabled
-    keytab_file = self.resource.keytab
-    kinit_path = self.resource.kinit_path_local
-    logoutput = self.resource.logoutput
+    
+    hadoop_bin_dir = main_resource.resource.hadoop_bin_dir
+    hadoop_conf_dir = main_resource.resource.hadoop_conf_dir
+    user = main_resource.resource.user
+    security_enabled = main_resource.resource.security_enabled
+    keytab_file = main_resource.resource.keytab
+    kinit_path = main_resource.resource.kinit_path_local
+    logoutput = main_resource.resource.logoutput
+    principal_name = main_resource.resource.principal_name
     jar_path=JAR_PATH
     json_path=JSON_PATH
 
     if security_enabled:
-      Execute(format("{kinit_path} -kt {keytab_file} {hdfs_principal_name}"),
-              user=user
-      )
+      self.kinit()
 
     # Write json file to disk
     File(JSON_PATH,
@@ -116,3 +117,290 @@ class HdfsResourceProvider(Provider):
 
     # Clean
     env.config['hdfs_files'] = []
+
+class WebHDFSUtil:
+  def __init__(self, address, run_user, logoutput, security_enabled, kinit_function):
+    self.address = address
+    self.run_user = run_user
+    self.logoutput = logoutput
+    self.security_enabled = security_enabled
+    self.kinit_function = kinit_function
+    
+  def parse_path(self, path):
+    """
+    hdfs://nn_url:1234/a/b/c -> /a/b/c
+    hdfs://nn_ha_name/a/b/c -> /a/b/c
+    hdfs:///a/b/c -> /a/b/c
+    /a/b/c -> /a/b/c
+    """
+    math_with_protocol_and_nn_url = re.match("[a-zA-Z]+://[^/]+(/.+)", path)
+    math_with_protocol = re.match("[a-zA-Z]+://(/.+)", path)
+    
+    if math_with_protocol_and_nn_url:
+      path = math_with_protocol_and_nn_url.group(1)
+    elif math_with_protocol:
+      path = math_with_protocol.group(1)
+    else:
+      path = path
+      
+    return re.sub("[/]+", "/", path)
+    
+  valid_status_codes = ["200", "201", "500"]
+  def run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
+    """
+    assertable_result - some POST requests return '{"boolean":false}' or '{"boolean":true}'
+    depending on if query was successful or not, we can assert this for them
+    """
+    target = self.parse_path(target)
+    
+    url = format("{address}/webhdfs/v1{target}?op={operation}&user.name={run_user}", address=self.address, run_user=self.run_user)
+    for k,v in kwargs.iteritems():
+      url = format("{url}&{k}={v}")
+    
+    if file_to_put and not os.path.exists(file_to_put):
+      raise Fail(format("File {file_to_put} is not found."))
+    
+    cmd = ["curl", "-L", "-w", "%{http_code}", "-X", method]
+    
+    if file_to_put:
+      cmd += ["-T", file_to_put]
+    if self.security_enabled:
+      self.kinit_function()
+      cmd += ["--negotiate", "-u", ":"]
+      
+    cmd.append(url)
+    _, out = shell.checked_call(cmd, user=self.run_user, logoutput=self.logoutput, quiet=False)
+    status_code = out[-3:]
+    out = out[:-3] # remove last line from output which is status code
+    
+    try:
+      result_dict = json.loads(out)
+    except ValueError:
+      result_dict = out
+          
+    if status_code not in WebHDFSUtil.valid_status_codes+ignore_status_codes or assertable_result and not result_dict['boolean']:
+      formatted_output = json.dumps(result_dict, indent=2) if isinstance(result_dict, dict) else result_dict
+      err_msg = "Execution of '%s' returned status_code=%s. %s" % (shell.string_cmd_from_args_list(cmd), status_code, formatted_output)
+      raise Fail(err_msg)
+    
+    return result_dict
+    
+class HdfsResourceWebHDFS:
+  """
+  This is the fastest implementation of HdfsResource using WebHDFS.
+  Since it's not available on non-hdfs FS and also can be disabled in scope of HDFS. 
+  We should still have the other implementations for such a cases.
+  """
+  def action_execute(self, main_resource):
+    pass
+  
+  def _assert_valid(self):
+    source = self.main_resource.resource.source
+    type = self.main_resource.resource.type
+    target = self.main_resource.resource.target
+    
+    if source:
+      if not os.path.exists(source):
+        raise Fail(format("Source {source} doesn't exist"))
+      if type == "directory" and os.path.isfile(source):
+        raise Fail(format("Source {source} is file but type is {type}"))
+      elif type == "file" and os.path.isdir(source): 
+        raise Fail(format("Source {source} is directory but type is {type}"))
+    
+    self.target_status = self._get_file_status(target)
+    
+    if self.target_status and self.target_status['type'].lower() != type:
+      raise Fail(format("Trying to create file/directory but directory/file exists in the DFS on {target}"))
+    
+  def action_delayed(self, action_name, main_resource):
+    main_resource.assert_parameter_is_set('user')
+    
+    address = main_resource.https_nn_address if main_resource.is_https_enabled else main_resource.http_nn_address
+    protocol = "https" if main_resource.is_https_enabled else "http"
+    
+    self.util = WebHDFSUtil(format("{protocol}://{address}"), main_resource.resource.user, 
+                            main_resource.resource.logoutput, main_resource.resource.security_enabled,
+                            main_resource.kinit)
+    self.mode = oct(main_resource.resource.mode)[1:] if main_resource.resource.mode else main_resource.resource.mode
+    self.mode_set = False
+    self.main_resource = main_resource
+    self._assert_valid()
+        
+    if action_name == "create":
+      self._create_resource()
+      self._set_mode(self.target_status)
+      self._set_owner(self.target_status)
+    else:
+      self._delete_resource()
+    
+  def _create_resource(self):
+    is_create = (self.main_resource.resource.source == None)
+    
+    if is_create and self.main_resource.resource.type == "directory":
+      self._create_directory(self.main_resource.resource.target)
+    elif is_create and self.main_resource.resource.type == "file":
+      self._create_file(self.main_resource.target, mode=self.mode)
+    elif not is_create and self.main_resource.resource.type == "file":
+      self._create_file(self.main_resource.resource.target, source=self.main_resource.resource.source, mode=self.mode)
+    elif not is_create and self.main_resource.resource.type == "directory":
+      self._create_directory(self.main_resource.resource.target)
+      self._copy_from_local_directory(self.main_resource.resource.target, self.main_resource.resource.source)
+    
+  def _copy_from_local_directory(self, target, source):
+    for next_path_part in os.listdir(source):
+      new_source = os.path.join(source, next_path_part)
+      new_target = format("{target}/{next_path_part}")
+      if os.path.isdir(new_source):
+        Logger.info(format("Creating DFS directory {new_target}"))
+        self._create_directory(new_target)
+        self._copy_from_local_directory(new_target, new_source)
+      else:
+        self._create_file(new_target, new_source)
+  
+  def _create_directory(self, target):
+    if target == self.main_resource.resource.target and self.target_status:
+      return
+    
+    self.util.run_command(target, 'MKDIRS', method='PUT')
+    
+  def _get_file_status(self, target):
+    list_status = self.util.run_command(target, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+    return list_status['FileStatus'] if 'FileStatus' in list_status else None
+    
+  def _create_file(self, target, source=None, mode=""):
+    """
+    PUT file command in slow, however _get_file_status is pretty fast,
+    so we should check if the file really should be put before doing it.
+    """
+    file_status = self._get_file_status(target) if target!=self.main_resource.resource.target else self.target_status
+    mode = "" if not mode else mode
+    
+    if file_status:
+      if source:
+        length = file_status['length']
+        local_file_size = os.stat(source).st_size # TODO: os -> sudo
+        
+        # TODO: re-implement this using checksums
+        if local_file_size == length:
+          Logger.info(format("DFS file {target} is identical to {source}, skipping the copying"))
+          return
+      else:
+        Logger.info(format("File {target} already exists in DFS, skipping the creation"))
+        return
+    
+    Logger.info(format("Creating new file {target} in DFS"))
+    kwargs = {'permission': mode} if mode else {}
+      
+    self.util.run_command(target, 'CREATE', method='PUT', overwrite=True, assertable_result=False, file_to_put=source, **kwargs)
+    
+    if mode and file_status:
+      file_status['permission'] = mode
+    
+     
+  def _delete_resource(self):
+    if not self.target_status:
+          return
+    self.util.run_command(self.main_resource.resource.target, 'DELETE', method='DELETE', recursive=True)
+
+  def _set_owner(self, file_status=None):
+    owner = "" if not self.main_resource.resource.owner else self.main_resource.resource.owner
+    group = "" if not self.main_resource.resource.group else self.main_resource.resource.group
+    
+    if (not owner or file_status and file_status['owner'] == owner) and (not group or file_status and file_status['group'] == group):
+      return
+    
+    self.util.run_command(self.main_resource.resource.target, 'SETOWNER', method='PUT', owner=owner, group=group, assertable_result=False)
+    
+    results = []
+    
+    if self.main_resource.resource.recursive_chown:
+      self._fill_directories_list(self.main_resource.resource.target, results)
+    if self.main_resource.resource.change_permissions_for_parents:
+      self._fill_in_parent_directories(self.main_resource.resource.target, results)
+      
+    for path in results:
+      self.util.run_command(path, 'SETOWNER', method='PUT', owner=owner, group=group, assertable_result=False)
+  
+  def _set_mode(self, file_status=None):
+    if not self.mode or file_status and file_status['permission'] == self.mode:
+      return
+    
+    if not self.mode_set:
+      self.util.run_command(self.main_resource.resource.target, 'SETPERMISSION', method='PUT', permission=self.mode, assertable_result=False)
+    
+    results = []
+    
+    if self.main_resource.resource.recursive_chmod:
+      self._fill_directories_list(self.main_resource.resource.target, results)
+    if self.main_resource.resource.change_permissions_for_parents:
+      self._fill_in_parent_directories(self.main_resource.resource.target, results)
+      
+    for path in results:
+      self.util.run_command(path, 'SETPERMISSION', method='PUT', permission=self.mode, assertable_result=False)
+    
+    
+  def _fill_in_parent_directories(self, target, results):
+    path_parts = self.util.parse_path(target).split("/") 
+    path = "/"
+    
+    for path_part in path_parts:
+      path += path_part + "/"
+      results.append(path)
+      
+  def _fill_directories_list(self, target, results):
+    list_status = self.util.run_command(target, 'LISTSTATUS', method='GET', assertable_result=False)['FileStatuses']['FileStatus']
+    
+    for file in list_status:
+      if file['pathSuffix']:
+        new_path = target + "/" + file['pathSuffix']
+        results.add(new_path)
+        
+        if file['type'] == 'DIRECTORY':
+          _fill_directories_list(new_path, results)  
+    
+class HdfsResourceProvider(Provider):
+  def __init__(self, resource):
+    super(HdfsResourceProvider,self).__init__(resource)
+    self.assert_parameter_is_set('hdfs_site')
+    
+    self.webhdfs_enabled = self.resource.hdfs_site['dfs.webhdfs.enabled']
+    self.is_https_enabled = self.resource.hdfs_site['dfs.https.enabled'] if not is_empty(self.resource.hdfs_site['dfs.https.enabled']) else False
+    self.https_nn_address = namenode_ha_utils.get_property_for_active_namenode(self.resource.hdfs_site, 'dfs.namenode.https-address')
+    self.http_nn_address = namenode_ha_utils.get_property_for_active_namenode(self.resource.hdfs_site, 'dfs.namenode.http-address')
+    
+  def action_delayed(self, action_name):
+    self.assert_parameter_is_set('type')
+
+    self.get_hdfs_resource_executor().action_delayed(action_name, self)
+
+  def action_create_on_execute(self):
+    self.action_delayed("create")
+
+  def action_delete_on_execute(self):
+    self.action_delayed("delete")
+
+  def action_execute(self):
+    self.get_hdfs_resource_executor().action_execute(self)
+
+  def get_hdfs_resource_executor(self):
+    # only hdfs seems to support webHDFS
+    if self.webhdfs_enabled and self.resource.default_fs.startswith("hdfs"):
+      return HdfsResourceWebHDFS()
+    else:
+      return HdfsResourceJar()
+  
+  def assert_parameter_is_set(self, parameter_name):
+    if not getattr(self.resource, parameter_name):
+      raise Fail("Resource parameter '{0}' is not set.".format(parameter_name))
+    return True
+  
+  def kinit(self):
+    keytab_file = self.resource.keytab
+    kinit_path = self.resource.kinit_path_local
+    principal_name = self.resource.principal_name
+    user = self.resource.user
+    
+    Execute(format("{kinit_path} -kt {keytab_file} {principal_name}"),
+            user=user
+    )    
+

+ 5 - 0
ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py

@@ -65,11 +65,16 @@ class HdfsResource(Resource):
   change_permissions_for_parents = BooleanArgument(default=False)
 
   security_enabled = BooleanArgument(default=False)
+  principal_name = ResourceArgument()
   keytab = ResourceArgument()
   kinit_path_local = ResourceArgument()
   user = ResourceArgument()
   hadoop_bin_dir = ResourceArgument()
   hadoop_conf_dir = ResourceArgument()
+  
+  # WebHDFS needs these
+  hdfs_site = ResourceArgument()
+  default_fs = ResourceArgument()
 
   #action 'execute' immediately creates all pending files/directories in efficient manner
   #action 'create_delayed/delete_delayed' adds file/directory to list of pending directories

+ 34 - 0
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java

@@ -879,6 +879,40 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
 
   protected void addMissingConfigs() throws AmbariException {
     updateHiveConfigs();
+    updateHdfsConfigs();
+  }
+  
+  protected void updateHdfsConfigs() throws AmbariException {
+    /***
+     * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
+     */
+    AmbariManagementController ambariManagementController = injector.getInstance(
+        AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+      Map<String, String> prop = new HashMap<String, String>();
+      String content = null;
+
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          content = null;
+          if (cluster.getDesiredConfigByType("hadoop-env") != null) {
+            content = cluster.getDesiredConfigByType(
+                "hadoop-env").getProperties().get("content");
+          }
+
+          if (content != null) {
+            content += "\nexport HADOOP_NAMENODE_OPTS=\"${HADOOP_NAMENODE_OPTS} -Dorg.mortbay.jetty.Request.maxFormContentSize=-1\"";
+
+            prop.put("content", content);
+            updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
+                prop, true, false);
+          }
+        }
+      }
+    }
   }
 
   protected void updateHiveConfigs() throws AmbariException {

+ 11 - 1
ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py

@@ -25,6 +25,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.script.script import Script
 
 
+
 import status_params
 
 # server configurations
@@ -148,6 +149,12 @@ hostname = status_params.hostname
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+# dfs.namenode.https-address
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -158,5 +165,8 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )

+ 10 - 1
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py

@@ -23,6 +23,7 @@ from resource_management import *
 import status_params
 from ambari_commons import OSCheck
 
+
 if OSCheck.is_windows_family():
   from params_windows import *
 else:
@@ -184,6 +185,11 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -194,7 +200,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 
 

+ 10 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py

@@ -27,6 +27,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 
+
 config = Script.get_config()
 
 stack_name = default("/hostLevelParams/stack_name", None)
@@ -106,6 +107,11 @@ supports_hive_dr = config['configurations']['falcon-env']['supports_hive_dr']
 local_data_mirroring_dir = "/usr/hdp/current/falcon-server/data-mirroring"
 dfs_data_mirroring_dir = "/apps/data-mirroring"
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -116,6 +122,9 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 

+ 10 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py

@@ -34,6 +34,7 @@ from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions import get_unique_id_and_date
 from resource_management.libraries.script.script import Script
 
+
 from resource_management.libraries.functions.substitute_vars import substitute_vars
 
 # server configurations
@@ -172,6 +173,11 @@ hostname = config["hostname"]
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -182,7 +188,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )
 
 # ranger host

+ 2 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml

@@ -194,13 +194,13 @@ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 # Command specific options appended to HADOOP_OPTS when specified
 
 {% if java_version &lt; 8 %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
 {% else %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)

+ 4 - 2
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py

@@ -166,13 +166,15 @@ def create_hdfs_directories(check):
                        type="directory",
                        action="create_on_execute",
                        owner=params.hdfs_user,
-                       mode=0777
+                       mode=0777,
+                       only_if=check
   )
   params.HdfsResource(params.smoke_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
+                       mode=params.smoke_hdfs_user_mode,
+                       only_if=check
   )
   params.HdfsResource(None, 
                       action="execute",

+ 1 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py

@@ -20,7 +20,7 @@ limitations under the License.
 
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.default import default
-from utils import get_value_from_jmx
+from resource_management.libraries.functions.jmx import get_value_from_jmx
 
 
 class NAMENODE_STATE:

+ 9 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -37,6 +37,7 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
 from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 
+
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
@@ -292,6 +293,10 @@ else:
   dn_kinit_cmd = ""
   nn_kinit_cmd = ""
   jn_kinit_cmd = ""
+  
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
 
 import functools
 #create partial functions with common arguments for every HdfsResource call
@@ -303,7 +308,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )
 
 

+ 0 - 11
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py

@@ -256,17 +256,6 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
          action="delete",
     )
 
-
-def get_value_from_jmx(qry, property):
-  try:
-    response = urllib2.urlopen(qry)
-    data = response.read()
-    if data:
-      data_dict = json.loads(data)
-      return data_dict["beans"][0][property]
-  except:
-    return None
-
 def get_jmx_data(nn_address, modeler_type, metric, encrypted=False):
   """
   :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.

+ 10 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py

@@ -36,6 +36,7 @@ from resource_management.libraries.functions.get_port_from_url import get_port_f
 from resource_management.libraries import functions
 
 
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -368,6 +369,11 @@ webhcat_hdfs_user_mode = 0755
 #for create_hdfs_directory
 security_param = "true" if security_enabled else "false"
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create hdfs directory we need to call params.HdfsResource in code
@@ -378,7 +384,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 
 

+ 10 - 1
ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py

@@ -27,6 +27,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -68,6 +69,11 @@ java64_home = config['hostLevelParams']['java_home']
 
 log4j_props = config['configurations']['mahout-log4j']['content']
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -78,5 +84,8 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )

+ 9 - 4
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py

@@ -193,9 +193,12 @@ oozie_hdfs_user_mode = 0775
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
 import functools
 #create partial functions with common arguments for every HdfsResource call
-#to create hdfs directory we need to call params.HdfsResource in code
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
@@ -203,9 +206,11 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
- )
-
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
 
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)

+ 9 - 1
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py

@@ -69,6 +69,11 @@ pig_properties = config['configurations']['pig-properties']['content']
 
 log4j_props = config['configurations']['pig-log4j']['content']
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create hdfs directory we need to call params.HdfsResource in code
@@ -79,6 +84,9 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 

+ 4 - 4
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py

@@ -47,13 +47,13 @@ class PigServiceCheckLinux(PigServiceCheck):
     params.HdfsResource(output_dir,
                         type="directory",
                         action="delete_on_execute",
-                        user=params.smokeuser,
+                        owner=params.smokeuser,
                         )
     params.HdfsResource(input_file,
                         type="file",
                         source="/etc/passwd",
                         action="create_on_execute",
-                        user=params.smokeuser,
+                        owner=params.smokeuser,
     )
     params.HdfsResource(None, action="execute")
  
@@ -85,13 +85,13 @@ class PigServiceCheckLinux(PigServiceCheck):
       params.HdfsResource(output_dir,
                           type="directory",
                           action="delete_on_execute",
-                          user=params.smokeuser,
+                          owner=params.smokeuser,
       )
       params.HdfsResource(input_file,
                           type="file",
                           source="/etc/passwd",
                           action="create_on_execute",
-                          user=params.smokeuser,
+                          owner=params.smokeuser,
       )
 
       # Check for Pig-on-Tez

+ 7 - 2
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py

@@ -30,6 +30,7 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+
 from resource_management.libraries.script.script import Script
 
 
@@ -152,7 +153,8 @@ if security_enabled:
       'hive.server2.enable.doAs': str(config['configurations']['hive-site']['hive.server2.enable.doAs']).lower()
     })
   
-
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+hdfs_site = config['configurations']['hdfs-site']
 
 
 import functools
@@ -165,5 +167,8 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )

+ 11 - 1
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py

@@ -27,6 +27,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -73,6 +74,12 @@ tez_user = config['configurations']['tez-env']['tez_user']
 user_group = config['configurations']['cluster-env']['user_group']
 tez_env_sh_template = config['configurations']['tez-env']['content']
 
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
@@ -83,7 +90,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )
 
 

+ 11 - 1
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py

@@ -29,6 +29,7 @@ from resource_management.libraries.functions.version import format_hdp_stack_ver
 from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
 
+
 import status_params
 
 # a map of the Ambari role to the component name
@@ -223,6 +224,12 @@ tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
 #for create_hdfs_directory
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -233,7 +240,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml

@@ -79,14 +79,14 @@ HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 
 {% if java_version &lt; 8 %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
 
 {% else %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS} -Dorg.mortbay.jetty.Request.maxFormContentSize=-1"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml

@@ -62,7 +62,7 @@ HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logge
 
 {% if java_version &lt; 8 %}
 SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
@@ -72,7 +72,7 @@ export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_
 
 {% else %}
 SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"

+ 15 - 6
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -291,7 +291,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = False,
@@ -303,7 +303,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -313,7 +313,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
 
@@ -417,7 +417,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = True,
@@ -429,7 +429,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -439,7 +439,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
 
@@ -543,7 +543,10 @@ class TestHBaseMaster(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://nn1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -554,7 +557,10 @@ class TestHBaseMaster(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://nn1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -566,7 +572,10 @@ class TestHBaseMaster(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://nn1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',

+ 83 - 16
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -102,9 +102,13 @@ class TestNamenode(RMFTestCase):
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'wasb://abc@c6401.ambari.apache.org',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -114,9 +118,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'wasb://abc@c6401.ambari.apache.org',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -129,13 +137,15 @@ class TestNamenode(RMFTestCase):
         only_if = None,
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'wasb://abc@c6401.ambari.apache.org',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
-    pass
 
   def test_install_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
@@ -209,9 +219,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -221,9 +235,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -236,7 +254,10 @@ class TestNamenode(RMFTestCase):
         only_if = None,
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -345,8 +366,9 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
+        only_if = None,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = True,
@@ -357,8 +379,9 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0770,
+        only_if = None,
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = True,
@@ -367,7 +390,7 @@ class TestNamenode(RMFTestCase):
         hadoop_bin_dir = '/usr/bin',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -437,9 +460,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -449,9 +476,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -464,7 +495,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -518,9 +552,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = True,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -530,9 +568,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = True,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -545,7 +587,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -608,9 +653,13 @@ class TestNamenode(RMFTestCase):
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -620,9 +669,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -635,7 +688,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -695,9 +751,13 @@ class TestNamenode(RMFTestCase):
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -707,9 +767,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -722,7 +786,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',

+ 13 - 1
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py

@@ -63,7 +63,10 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['create_on_execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -74,7 +77,10 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['delete_on_execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -84,8 +90,11 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['create_on_execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -95,7 +104,10 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',

+ 14 - 14
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -280,7 +280,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/hcat',
@@ -292,7 +292,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
 
@@ -305,7 +305,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -317,7 +317,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0700,
     )
     if not no_tmp:
@@ -331,7 +331,7 @@ class TestHiveServer(RMFTestCase):
           group = 'hdfs',
           hadoop_bin_dir = '/usr/bin',
           type = 'directory',
-          action = ['create_on_execute'],
+          action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
           mode = 0777,
       )
     self.assertResourceCalled('HdfsResource', None,
@@ -340,7 +340,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/hive',
@@ -457,7 +457,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/user/hcat',
@@ -469,7 +469,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
 
@@ -482,7 +482,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/user/hive',
@@ -494,7 +494,7 @@ class TestHiveServer(RMFTestCase):
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0700,
     )
     self.assertResourceCalled('HdfsResource', '/custompath/tmp/hive',
@@ -507,7 +507,7 @@ class TestHiveServer(RMFTestCase):
         group = 'hdfs',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -516,7 +516,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/hive',
@@ -818,7 +818,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertNoMoreResources()
@@ -856,7 +856,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertNoMoreResources()

+ 24 - 6
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py

@@ -91,8 +91,11 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
         source = '/tmp/idtest.ambari-qa.1431110511.43.pig',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'missing_principal',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -103,8 +106,11 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'missing_principal',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -115,7 +121,10 @@ class TestServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'missing_principal',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -189,34 +198,43 @@ class TestServiceCheck(RMFTestCase):
         owner = "hdfs"
     )
     self.assertResourceCalled('HdfsResource', '/tmp/idtest.ambari-qa.1431110511.43.pig',
-        action = ['create_on_execute'],
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
-        kinit_path_local = '/usr/bin/kinit',
         source = '/tmp/idtest.ambari-qa.1431110511.43.pig',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
+        action = ['create_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', '/tmp/idtest.ambari-qa.1431110511.43.in',
-        action = ['create_on_execute'],
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
+        action = ['create_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',

+ 30 - 12
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py

@@ -60,7 +60,7 @@ class TestOozieServer(RMFTestCase):
         owner = 'oozie',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -69,7 +69,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
@@ -349,7 +349,7 @@ class TestOozieServer(RMFTestCase):
         owner = 'oozie',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -358,7 +358,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
@@ -516,7 +516,7 @@ class TestOozieServer(RMFTestCase):
         owner = 'oozie',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0775,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -526,7 +526,7 @@ class TestOozieServer(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
@@ -869,14 +869,17 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('Execute', 'hdp-select set oozie-server 2.2.1.0-2135',)
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
         security_enabled = False,
-        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
         user = 'hdfs',
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         recursive_chmod = True,
         owner = 'oozie',
         group = 'hadoop',
-        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
         action = ['create_on_execute'],
         mode = 0755,
@@ -885,7 +888,10 @@ class TestOozieServer(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -950,14 +956,17 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('Execute', 'hdp-select set oozie-server 2.3.0.0-1234')
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
         security_enabled = False,
-        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
         user = 'hdfs',
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         recursive_chmod = True,
         owner = 'oozie',
         group = 'hadoop',
-        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
         action = ['create_on_execute'],
         mode = 0755,
@@ -966,7 +975,10 @@ class TestOozieServer(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -1026,14 +1038,17 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('Execute', 'hdp-select set oozie-server 2.2.0.0-0000')
     self.assertResourceCalled('HdfsResource', '/user/oozie/share',
         security_enabled = False,
-        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        hadoop_bin_dir = '/usr/hdp/2.2.0.0-0000/hadoop/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
         user = 'hdfs',
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         recursive_chmod = True,
         owner = 'oozie',
         group = 'hadoop',
-        hadoop_bin_dir = '/usr/hdp/2.2.0.0-0000/hadoop/bin',
+        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
         action = ['create_on_execute'],
         mode = 0755,
@@ -1042,7 +1057,10 @@ class TestOozieServer(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/2.2.0.0-0000/hadoop/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',

+ 5 - 5
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py

@@ -71,7 +71,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['delete_on_execute'],
+        action = ['delete_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
     )
@@ -82,7 +82,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '//examples',
         user = 'hdfs',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         owner = 'ambari-qa',
@@ -94,7 +94,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['delete_on_execute'],
+        action = ['delete_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
     )
@@ -105,7 +105,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '//examples/input-data',
         user = 'hdfs',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
         owner = 'ambari-qa',
@@ -117,7 +117,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Execute', '/tmp/oozieSmoke2.sh suse /var/lib/oozie /etc/oozie/conf /usr/bin / /etc/hadoop/conf /usr/bin ambari-qa False',

+ 32 - 10
ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py

@@ -36,28 +36,39 @@ class TestPigServiceCheck(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
-        user = 'ambari-qa',
-        action = ['delete_on_execute'],
+        principal_name = UnknownConfigurationMock(),
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
+        action = ['delete_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
-        user = 'ambari-qa',
-        action = ['create_on_execute'],
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
+        action = ['create_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = False,
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -96,28 +107,39 @@ class TestPigServiceCheck(RMFTestCase):
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
-        user = 'ambari-qa',
-        action = ['delete_on_execute'],
+        principal_name = 'hdfs',
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
+        action = ['delete_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
-        user = 'ambari-qa',
-        action = ['create_on_execute'],
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
+        action = ['create_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = True,
         hadoop_bin_dir = '/usr/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',

+ 11 - 11
ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py

@@ -154,7 +154,7 @@ class TestHistoryServer(RMFTestCase):
         group = 'hadoop',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/mapred',
@@ -166,7 +166,7 @@ class TestHistoryServer(RMFTestCase):
         owner = 'mapred',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mapred/system',
         security_enabled = False,
@@ -177,7 +177,7 @@ class TestHistoryServer(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mr-history/done',
         security_enabled = False,
@@ -190,7 +190,7 @@ class TestHistoryServer(RMFTestCase):
         group = 'hadoop',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -199,7 +199,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -363,7 +363,7 @@ class TestHistoryServer(RMFTestCase):
         group = 'hadoop',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', '/mapred',
@@ -375,7 +375,7 @@ class TestHistoryServer(RMFTestCase):
         owner = 'mapred',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mapred/system',
         security_enabled = True,
@@ -386,7 +386,7 @@ class TestHistoryServer(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/mr-history/done',
         security_enabled = True,
@@ -399,7 +399,7 @@ class TestHistoryServer(RMFTestCase):
         group = 'hadoop',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -408,7 +408,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -701,7 +701,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
 

+ 6 - 6
ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py

@@ -45,7 +45,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['delete_on_execute'],
+        action = ['delete_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
     )
@@ -56,7 +56,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
         user = 'hdfs',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
     )
@@ -66,7 +66,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('ExecuteHadoop', 'jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples-2.*.jar wordcount /user/ambari-qa/mapredsmokeinput /user/ambari-qa/mapredsmokeoutput',
@@ -99,7 +99,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['delete_on_execute'],
+        action = ['delete_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
     )
@@ -110,7 +110,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
         user = 'hdfs',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
     )
@@ -120,7 +120,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa@EXAMPLE.COM;',

+ 3 - 3
ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py

@@ -68,7 +68,7 @@ class TestResourceManager(RMFTestCase):
         owner = 'tez',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', '/apps/tez/lib/',
@@ -80,7 +80,7 @@ class TestResourceManager(RMFTestCase):
         owner = 'tez',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0755,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -89,7 +89,7 @@ class TestResourceManager(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',

+ 3 - 3
ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py

@@ -131,7 +131,7 @@ class TestFalconServer(RMFTestCase):
         owner = 'falcon',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
     )
     self.assertResourceCalled('Directory', '/hadoop/falcon/store',
@@ -150,7 +150,7 @@ class TestFalconServer(RMFTestCase):
         type = 'directory',
         recursive_chown = True,
         recursive_chmod = True,
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0770,
         source='/usr/hdp/current/falcon-server/data-mirroring'
     )
@@ -161,7 +161,7 @@ class TestFalconServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('Directory', '/hadoop/falcon',

+ 3 - 3
ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py

@@ -46,7 +46,7 @@ class TestTezServiceCheck(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/tmp/tezsmokeinput/sample-tez-test',
         security_enabled = False,
@@ -58,7 +58,7 @@ class TestTezServiceCheck(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = False,
@@ -66,7 +66,7 @@ class TestTezServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertResourceCalled('ExecuteHadoop', 'jar /usr/lib/tez/tez-mapreduce-examples*.jar orderedwordcount /tmp/tezsmokeinput/sample-tez-test /tmp/tezsmokeoutput/',

+ 32 - 10
ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py

@@ -42,28 +42,39 @@ class TestPigServiceCheck(RMFTestCase):
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
-        user = 'ambari-qa',
-        action = ['delete_on_execute'],
+        principal_name = 'hdfs@EXAMPLE.COM',
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
+        action = ['delete_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
-        user = 'ambari-qa',
-        action = ['create_on_execute'],
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs@EXAMPLE.COM',
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'file',
+        action = ['create_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -88,22 +99,30 @@ class TestPigServiceCheck(RMFTestCase):
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
-        user = 'ambari-qa',
-        action = ['delete_on_execute'],
+        principal_name = 'hdfs@EXAMPLE.COM',
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
+        action = ['delete_on_execute'],
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/passwd',
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
-        kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
-        user = 'ambari-qa',
-        action = ['create_on_execute'],
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+        kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs@EXAMPLE.COM',
+        user = 'hdfs',
+        owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'file',
+        action = ['create_on_execute'],
     )
 
     copy_to_hdfs_mock.assert_called_with("tez", "hadoop", "hdfs")
@@ -111,7 +130,10 @@ class TestPigServiceCheck(RMFTestCase):
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',

+ 13 - 1
ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py

@@ -133,7 +133,10 @@ class TestJobHistoryServer(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = {u'a': u'b'},
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         owner = 'spark',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -145,7 +148,10 @@ class TestJobHistoryServer(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = {u'a': u'b'},
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -190,7 +196,10 @@ class TestJobHistoryServer(RMFTestCase):
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = UnknownConfigurationMock(),
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = UnknownConfigurationMock(),
         owner = 'spark',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -202,7 +211,10 @@ class TestJobHistoryServer(RMFTestCase):
         security_enabled = True,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = UnknownConfigurationMock(),
+        hdfs_site = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = UnknownConfigurationMock(),
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -260,7 +272,7 @@ class TestJobHistoryServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertNoMoreResources()

+ 4 - 4
ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py

@@ -43,7 +43,7 @@ class TestMahoutClient(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['delete_on_execute'],
+        action = ['delete_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
     )
@@ -56,7 +56,7 @@ class TestMahoutClient(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt',
         security_enabled = False,
@@ -68,7 +68,7 @@ class TestMahoutClient(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'file',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = False,
@@ -76,7 +76,7 @@ class TestMahoutClient(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
     self.assertResourceCalled('Execute', 'mahout seqdirectory --input /user/ambari-qa/mahoutsmokeinput/'

+ 1 - 1
ambari-server/src/test/python/stacks/utils/RMFTestCase.py

@@ -174,7 +174,7 @@ class RMFTestCase(TestCase):
   
   def _ppformat(self, val):
     if isinstance(val, dict) and len(val) > MAX_SHOWN_DICT_LEN:
-      return "self.getConfig()['configurations']['?']"
+      return "self.getConfig()['configurations']['hdfs-site']"
     
     val = pprint.pformat(val)