Browse Source

AMBARI-15487. Add support for ECS stack (Vijay Srinivasaraghavan via smohanty)

Sumit Mohanty 9 years ago
parent
commit
4843e25949
100 changed files with 2859 additions and 62 deletions
  1. 9 5
      ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
  2. 3 0
      ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
  3. 5 1
      ambari-server/sbin/ambari-server
  4. 12 0
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
  5. 0 2
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
  6. 23 0
      ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
  7. 11 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
  8. 33 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
  9. 24 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  10. 11 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/StackServiceResponse.java
  11. 1 4
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
  12. 6 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackServiceResourceProvider.java
  13. 12 1
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
  14. 4 3
      ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
  15. 25 2
      ambari-server/src/main/python/ambari-server.py
  16. 94 0
      ambari-server/src/main/python/ambari_server/enableStack.py
  17. 2 1
      ambari-server/src/main/python/ambari_server/setupActions.py
  18. 5 1
      ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
  19. 4 2
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/hbase.py
  20. 3 0
      ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params_linux.py
  21. 4 1
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
  22. 5 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
  23. 4 1
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  24. 4 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
  25. 4 1
      ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
  26. 5 1
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
  27. 4 1
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
  28. 3 1
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
  29. 4 1
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
  30. 2 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py
  31. 5 2
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
  32. 9 8
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
  33. 4 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  34. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
  35. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
  36. 3 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
  37. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
  38. 40 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  39. 27 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
  40. 23 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/metainfo.xml
  41. 122 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml
  42. 10 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json
  43. 28 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ACCUMULO/metainfo.xml
  44. 28 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ATLAS/metainfo.xml
  45. 129 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
  46. 130 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml
  47. 40 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml
  48. 53 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/kerberos.json
  49. 84 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml
  50. 112 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
  51. 82 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
  52. 47 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py
  53. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/FALCON/metainfo.xml
  54. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/FLUME/metainfo.xml
  55. 107 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml
  56. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml
  57. 132 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/kerberos.json
  58. 58 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/metainfo.xml
  59. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HDFS/metainfo.xml
  60. 91 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HIVE/metainfo.xml
  61. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/KAFKA/metainfo.xml
  62. 26 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/KERBEROS/metainfo.xml
  63. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/KNOX/metainfo.xml
  64. 28 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/MAHOUT/metainfo.xml
  65. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/OOZIE/metainfo.xml
  66. 32 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/RANGER/metainfo.xml
  67. 30 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/RANGER_KMS/metainfo.xml
  68. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/SLIDER/metainfo.xml
  69. 30 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/SPARK/metainfo.xml
  70. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/SQOOP/metainfo.xml
  71. 28 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/STORM/metainfo.xml
  72. 27 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml
  73. 59 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/metainfo.xml
  74. 34 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml
  75. 29 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml
  76. 215 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
  77. 145 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/metainfo.xml
  78. 51 0
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ZOOKEEPER/metainfo.xml
  79. 17 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  80. 13 2
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
  81. 8 0
      ambari-server/src/test/java/org/apache/ambari/server/state/ServiceInfoTest.java
  82. 2 0
      ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
  83. 9 0
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
  84. 27 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
  85. 4 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
  86. 14 0
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
  87. 6 0
      ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
  88. 16 0
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
  89. 5 0
      ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
  90. 6 0
      ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
  91. 17 1
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
  92. 6 0
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
  93. 6 0
      ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
  94. 8 0
      ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
  95. 1 0
      ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
  96. 6 0
      ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
  97. 6 0
      ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
  98. 4 0
      ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
  99. 2 0
      ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
  100. 5 0
      ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py

+ 9 - 5
ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py

@@ -52,8 +52,8 @@ RESOURCE_TO_JSON_FIELDS = {
   'recursive_chown': 'recursiveChown',
   'recursive_chmod': 'recursiveChmod',
   'change_permissions_for_parents': 'changePermissionforParents',
-  'manage_if_exists': 'manageIfExists'
-
+  'manage_if_exists': 'manageIfExists',
+  'dfs_type': 'dfs_type'
 }
 
 class HdfsResourceJar:
@@ -404,9 +404,11 @@ class HdfsResourceWebHDFS:
 class HdfsResourceProvider(Provider):
   def __init__(self, resource):
     super(HdfsResourceProvider,self).__init__(resource)
-    self.assert_parameter_is_set('hdfs_site')
     self.ignored_resources_list = HdfsResourceProvider.get_ignored_resources_list(self.resource.hdfs_resource_ignore_file)
-    self.webhdfs_enabled = self.resource.hdfs_site['dfs.webhdfs.enabled']
+    self.fsType = getattr(resource, 'dfs_type')
+    if self.fsType != 'HCFS':
+      self.assert_parameter_is_set('hdfs_site')
+      self.webhdfs_enabled = self.resource.hdfs_site['dfs.webhdfs.enabled']
           
   @staticmethod
   def parse_path(path):
@@ -467,7 +469,9 @@ class HdfsResourceProvider(Provider):
     self.get_hdfs_resource_executor().action_execute(self)
 
   def get_hdfs_resource_executor(self):
-    if WebHDFSUtil.is_webhdfs_available(self.webhdfs_enabled, self.resource.default_fs):
+    if self.fsType == 'HCFS':
+      return HdfsResourceJar()
+    elif WebHDFSUtil.is_webhdfs_available(self.webhdfs_enabled, self.resource.default_fs):
       return HdfsResourceWebHDFS()
     else:
       return HdfsResourceJar()

+ 3 - 0
ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py

@@ -99,6 +99,9 @@ class HdfsResource(Resource):
   hdfs_site = ResourceArgument()
   default_fs = ResourceArgument()
 
+  # To support HCFS
+  dfs_type = ResourceArgument(default="")
+
   #action 'execute' immediately creates all pending files/directories in efficient manner
   #action 'create_delayed/delete_delayed' adds file/directory to list of pending directories
   actions = Resource.actions + ["create_on_execute", "delete_on_execute", "execute"]

+ 5 - 1
ambari-server/sbin/ambari-server

@@ -146,9 +146,13 @@ case "$1" in
         echo -e "Cleanup database..."
         $PYTHON /usr/sbin/ambari-server.py $@
         ;;
+  enable-stack)
+        echo -e "Enabling stack(s)..."
+        $PYTHON /usr/sbin/ambari-server.py $@
+        ;;
   *)
         echo "Usage: $AMBARI_PYTHON_EXECUTABLE
-        {start|stop|restart|setup|setup-jce|upgrade|status|upgradestack|setup-ldap|sync-ldap|set-current|setup-security|setup-sso|refresh-stack-hash|backup|restore|update-host-names|check-database|db-cleanup} [options]
+        {start|stop|restart|setup|setup-jce|upgrade|status|upgradestack|setup-ldap|sync-ldap|set-current|setup-security|setup-sso|refresh-stack-hash|backup|restore|update-host-names|check-database|db-cleanup|enable-stack} [options]
         Use $AMBARI_PYTHON_EXECUTABLE <action> --help to get details on options available.
         Or, simply invoke ambari-server.py --help to print the options."
         exit 1

+ 12 - 0
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java

@@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
@@ -952,6 +953,17 @@ class ActionScheduler implements Runnable {
     commandParamsCmd.putAll(commandParams);
     cmd.setCommandParams(commandParamsCmd);
 
+    try {
+      Cluster cluster = clusters.getCluster(s.getClusterName());
+      if (null != cluster) {
+        // Generate localComponents
+        for (ServiceComponentHost sch : cluster.getServiceComponentHosts(hostname)) {
+          cmd.getLocalComponents().add(sch.getServiceComponentName());
+        }
+      }
+    } catch (ClusterNotFoundException cnfe) {
+      //NOP
+    }
 
     //Try to get hostParams from cache and merge them with command-level parameters
     Map<String, String> hostParams = hostParamsStageCache.getIfPresent(stagePk);

+ 0 - 2
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java

@@ -19,7 +19,6 @@ package org.apache.ambari.server.actionmanager;
 
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 
@@ -31,7 +30,6 @@ import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.utils.StageUtils;
 
 import com.google.inject.Inject;

+ 23 - 0
ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java

@@ -99,12 +99,18 @@ public class ExecutionCommand extends AgentCommand {
   @SerializedName("serviceName")
   private String serviceName;
 
+  @SerializedName("serviceType")
+  private String serviceType;  
+  
   @SerializedName("componentName")
   private String componentName;
 
   @SerializedName("kerberosCommandParams")
   private List<Map<String, String>> kerberosCommandParams = new ArrayList<Map<String, String>>();
 
+  @SerializedName("localComponents")
+  private Set<String> localComponents = new HashSet<String>();
+
   public String getCommandId() {
     return commandId;
   }
@@ -247,6 +253,14 @@ public class ExecutionCommand extends AgentCommand {
     this.forceRefreshConfigTagsBeforeExecution = forceRefreshConfigTagsBeforeExecution;
   }
 
+  public Set<String> getLocalComponents() {
+    return localComponents;
+  }
+
+  public void setLocalComponents(Set<String> localComponents) {
+    this.localComponents = localComponents;
+  }
+
   public Map<String, Map<String, Map<String, String>>> getConfigurationAttributes() {
     return configurationAttributes;
   }
@@ -270,6 +284,14 @@ public class ExecutionCommand extends AgentCommand {
   public void setServiceName(String serviceName) {
     this.serviceName = serviceName;
   }
+  
+  public String getServiceType() {
+	return serviceType;
+  }
+
+  public void setServiceType(String serviceType) {
+	this.serviceType = serviceType;
+  }
 
   public String getComponentName() {
     return componentName;
@@ -320,6 +342,7 @@ public class ExecutionCommand extends AgentCommand {
     String SERVICE_PACKAGE_FOLDER = "service_package_folder";
     String HOOKS_FOLDER = "hooks_folder";
     String STACK_NAME = "stack_name";
+    String SERVICE_TYPE = "service_type";
     String STACK_VERSION = "stack_version";
     String SERVICE_REPO_INFO = "service_repo_info";
     String PACKAGE_LIST = "package_list";

+ 11 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java

@@ -30,6 +30,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAM
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -453,8 +454,16 @@ public class AmbariActionExecutionHelper {
 
           execCmd.setForceRefreshConfigTagsBeforeExecution(configsToRefresh);
         }
-      }
-    }
+      } 
+
+      if (null != cluster) {
+        // Generate localComponents
+        for (ServiceComponentHost sch : cluster.getServiceComponentHosts(hostName)) {
+          execCmd.getLocalComponents().add(sch.getServiceComponentName());
+        }
+     } 
+
+     } 
   }
 
   /*

+ 33 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java

@@ -54,6 +54,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Iterator;
 import java.util.Set;
 import java.util.TreeMap;
 
@@ -596,7 +597,25 @@ public class AmbariCustomCommandExecutionHelper {
     execCmd.setClusterHostInfo(
         StageUtils.getClusterHostInfo(cluster));
 
+    // Generate localComponents
+    for (ServiceComponentHost sch : cluster.getServiceComponentHosts(hostname)) {
+      execCmd.getLocalComponents().add(sch.getServiceComponentName());
+    }
+
     Map<String, String> commandParams = new TreeMap<String, String>();
+
+    //Propagate HCFS service type info
+    Iterator<Service> it = cluster.getServices().values().iterator();
+    while(it.hasNext()) {
+        ServiceInfo serviceInfoInstance = ambariMetaInfo.getService(stackId.getStackName(),stackId.getStackVersion(), it.next().getName());
+        LOG.info("Iterating service type Instance in addServiceCheckAction:: " + serviceInfoInstance.getName());
+        if(serviceInfoInstance.getServiceType() != null) {
+            LOG.info("Adding service type info in addServiceCheckAction:: " + serviceInfoInstance.getServiceType());
+            commandParams.put("dfs_type",serviceInfoInstance.getServiceType());
+            break;
+        }
+    }
+
     String commandTimeout = configs.getDefaultAgentTaskTimeout(false);
 
 
@@ -922,7 +941,7 @@ public class AmbariCustomCommandExecutionHelper {
    *
    * @param actionExecutionContext  received request to execute a command
    * @param stage                   the initial stage for task creation
-   * @param retryAllowed            indicates whether the the command allows retry
+   * @param requestParams           the request params
    *
    * @throws AmbariException if the commands can not be added
    */
@@ -1106,6 +1125,19 @@ public class AmbariCustomCommandExecutionHelper {
         hostParamsStage.put(CLIENTS_TO_UPDATE_CONFIGS, clientsToUpdateConfigs);
       }
       clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo);
+
+      //Propogate HCFS service type info to command params
+      Iterator<Service> it = cluster.getServices().values().iterator();
+      while(it.hasNext()) {
+          ServiceInfo serviceInfoInstance = ambariMetaInfo.getService(stackId.getStackName(),stackId.getStackVersion(), it.next().getName());
+          LOG.info("Iterating service type Instance in getCommandJson:: " + serviceInfoInstance.getName());
+          if(serviceInfoInstance.getServiceType() != null) {
+              LOG.info("Adding service type info in getCommandJson:: " + serviceInfoInstance.getServiceType());
+              commandParamsStage.put("dfs_type",serviceInfoInstance.getServiceType());
+              break;
+          }
+      }      
+
     }
 
     String hostParamsStageJson = StageUtils.getGson().toJson(hostParamsStage);

+ 24 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -151,6 +151,7 @@ import java.util.Collections;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -1930,17 +1931,31 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     Host host = clusters.getHost(scHost.getHostName());
 
+    LOG.info("Adding service type info in createHostAction:: " + serviceInfo.getServiceType());
+    execCmd.setServiceType(serviceInfo.getServiceType());
+    
     execCmd.setConfigurations(configurations);
     execCmd.setConfigurationAttributes(configurationAttributes);
     execCmd.setConfigurationTags(configTags);
 
-
-
     // Create a local copy for each command
     Map<String, String> commandParams = new TreeMap<String, String>();
     if (commandParamsInp != null) { // if not defined
       commandParams.putAll(commandParamsInp);
     }
+
+    //Propogate HCFS service type info
+    Iterator<Service> it = cluster.getServices().values().iterator();
+    while(it.hasNext()) {
+       ServiceInfo serviceInfoInstance = ambariMetaInfo.getService(stackId.getStackName(),stackId.getStackVersion(), it.next().getName());
+       LOG.info("Iterating service type Instance in createHostAction:: " + serviceInfoInstance.getName());
+       if(serviceInfoInstance.getServiceType() != null) {
+           LOG.info("Adding service type info in createHostAction:: " + serviceInfoInstance.getServiceType());
+            commandParams.put("dfs_type",serviceInfoInstance.getServiceType());
+           break;
+       }
+    }
+
     boolean isInstallCommand = roleCommand.equals(RoleCommand.INSTALL);
     String agentDefaultCommandTimeout = configs.getDefaultAgentTaskTimeout(isInstallCommand);
     String scriptCommandTimeout = "";
@@ -2565,6 +2580,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     ec.setClusterHostInfo(
         StageUtils.getClusterHostInfo(cluster));
 
+    if (null != cluster) {
+      // Generate localComponents
+      for (ServiceComponentHost sch : cluster.getServiceComponentHosts(scHost.getHostName())) {
+        ec.getLocalComponents().add(sch.getServiceComponentName());
+      }
+    }
+
     // Hack - Remove passwords from configs
     if ((ec.getRole().equals(Role.HIVE_CLIENT.toString()) ||
             ec.getRole().equals(Role.WEBHCAT_SERVER.toString()) ||

+ 11 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/StackServiceResponse.java

@@ -33,6 +33,7 @@ public class StackServiceResponse {
   private String stackName;
   private String stackVersion;
   private String serviceName;
+  private String serviceType;
   private String serviceDisplayName;
   private String userName;
   private String comments;
@@ -61,6 +62,7 @@ public class StackServiceResponse {
    */
   public StackServiceResponse(ServiceInfo service) {
     serviceName = service.getName();
+    serviceType = service.getServiceType();
     serviceDisplayName = service.getDisplayName();
     userName = null;
     comments = service.getComment();
@@ -107,8 +109,16 @@ public class StackServiceResponse {
   public void setServiceName(String serviceName) {
     this.serviceName = serviceName;
   }
+  
+  public String getServiceType() {
+	return serviceType;
+  }
+
+  public void setServiceType(String serviceType) {
+	this.serviceType = serviceType;
+  }
 
-  public String getServiceDisplayName() {
+public String getServiceDisplayName() {
     return serviceDisplayName;
   }
 

+ 1 - 4
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java

@@ -410,15 +410,12 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
       } catch (TimeoutException e) {
         LOG.error("Generate client configs script was killed due to timeout ", e);
         throw new SystemException("Generate client configs script was killed due to timeout ", e);
-      } catch (InterruptedException e) {
+      } catch (InterruptedException | IOException e) {
         LOG.error("Failed to run generate client configs script for a component " + componentName, e);
         throw new SystemException("Failed to run generate client configs script for a component " + componentName, e);
       } catch (ExecutionException e) {
         LOG.error(e.getMessage(),e);
         throw new SystemException(e.getMessage() + " " + e.getCause());
-      } catch (IOException e) {
-        LOG.error("Failed to run generate client configs script for a component " + componentName, e);
-        throw new SystemException("Failed to run generate client configs script for a component " + componentName, e);
       }
 
     } catch (AmbariException e) {

+ 6 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackServiceResourceProvider.java

@@ -40,6 +40,9 @@ public class StackServiceResourceProvider extends ReadOnlyResourceProvider {
 
   protected static final String SERVICE_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(
       "StackServices", "service_name");
+  
+  protected static final String SERVICE_TYPE_PROPERTY_ID = PropertyHelper.getPropertyId(
+		  "StackServices", "service_type"); 
 
   public static final String STACK_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(
       "StackServices", "stack_name");
@@ -124,6 +127,9 @@ public class StackServiceResourceProvider extends ReadOnlyResourceProvider {
 
       setResourceProperty(resource, SERVICE_NAME_PROPERTY_ID,
           response.getServiceName(), requestedIds);
+      
+      setResourceProperty(resource, SERVICE_TYPE_PROPERTY_ID,
+    		  response.getServiceType(), requestedIds); 
 
       setResourceProperty(resource, SERVICE_DISPLAY_NAME_PROPERTY_ID,
           response.getServiceDisplayName(), requestedIds);

+ 12 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java

@@ -56,6 +56,7 @@ public class ServiceInfo implements Validable{
   private String displayName;
   private String version;
   private String comment;
+  private String serviceType;
   private List<PropertyInfo> properties;
 
   @XmlElementWrapper(name="components")
@@ -253,8 +254,16 @@ public class ServiceInfo implements Validable{
   public void setDisplayName(String displayName) {
     this.displayName = displayName;
   }
+  
+  public String getServiceType() {
+	return serviceType;
+  }
+
+  public void setServiceType(String serviceType) {
+	this.serviceType = serviceType;
+  }
 
-  public String getVersion() {
+public String getVersion() {
     return version;
   }
 
@@ -345,6 +354,8 @@ public class ServiceInfo implements Validable{
     StringBuilder sb = new StringBuilder();
     sb.append("Service name:");
     sb.append(name);
+    sb.append("\nService type:");
+    sb.append(serviceType); 
     sb.append("\nversion:");
     sb.append(version);
     sb.append("\ncomment:");

+ 4 - 3
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java

@@ -2678,10 +2678,11 @@ public class ClusterImpl implements Cluster {
             serviceName = entry.getKey();
             break;
           } else if (!serviceName.equals(entry.getKey())) {
-            String error = "Updating configs for multiple services by a " +
-                "single API request isn't supported";
+            String error = String.format("Updating configs for multiple services by a " +
+                "single API request isn't supported. Conflicting services %s and %s for %s",
+                                         serviceName, entry.getKey(), config.getType());
             IllegalArgumentException exception = new IllegalArgumentException(error);
-            LOG.error(error + ", config version not created");
+            LOG.error(error + ", config version not created for {}", serviceName);
             throw exception;
           } else {
             break;

+ 25 - 2
ambari-server/src/main/python/ambari-server.py

@@ -39,11 +39,12 @@ from ambari_server.setupHttps import setup_https, setup_truststore
 from ambari_server.dbCleanup import db_cleanup
 from ambari_server.hostUpdate import update_host_names
 from ambari_server.checkDatabase import check_database
+from ambari_server.enableStack import enable_stack_version
 
 from ambari_server.setupActions import BACKUP_ACTION, LDAP_SETUP_ACTION, LDAP_SYNC_ACTION, PSTART_ACTION, \
   REFRESH_STACK_HASH_ACTION, RESET_ACTION, RESTORE_ACTION, UPDATE_HOST_NAMES_ACTION, CHECK_DATABASE_ACTION, \
   SETUP_ACTION, SETUP_SECURITY_ACTION,START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, UPGRADE_STACK_ACTION, \
-  SETUP_JCE_ACTION, SET_CURRENT_ACTION,DB_CLEANUP_ACTION
+  SETUP_JCE_ACTION, SET_CURRENT_ACTION,DB_CLEANUP_ACTION, ENABLE_STACK_ACTION
 from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas
 from ambari_server.userInput import get_validated_string_input
 
@@ -378,6 +379,10 @@ def init_parser_options(parser):
   parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
   parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
   parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
+  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
+                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
+  parser.add_option('--stack', dest="stack_name", default=None, type="string",
+                    help="Specify stack name for the stack versions that needs to be enabled")
 
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def are_cmd_line_db_args_blank(options):
@@ -525,7 +530,8 @@ def create_user_action_map(args, options):
         RESTORE_ACTION: UserActionPossibleArgs(restore, [1, 2], args),
         UPDATE_HOST_NAMES_ACTION: UserActionPossibleArgs(update_host_names, [2], args, options),
         CHECK_DATABASE_ACTION: UserAction(check_database, options),
-        DB_CLEANUP_ACTION: UserAction(db_cleanup, options)
+        DB_CLEANUP_ACTION: UserAction(db_cleanup, options),
+        ENABLE_STACK_ACTION: UserAction(enable_stack, options, args)
       }
   return action_map
 
@@ -636,6 +642,23 @@ def mainBody():
       print_error_msg("Unexpected {0}: {1}".format((e).__class__.__name__, str(e)) +\
       "\nFor more info run ambari-server with -v or --verbose option")
       sys.exit(1)     
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def enable_stack(options, args):
+  if options.stack_name == None:
+     print_error_msg ("Please provide stack name using --stack option")
+     return -1
+  if options.stack_versions == None:
+     print_error_msg ("Please provide stack version using --version option")
+     return -1
+  print_info_msg ("Going to enable Stack Versions: " +  str(options.stack_versions) + " for the stack: " + str(options.stack_name))
+  retcode = enable_stack_version(options.stack_name,options.stack_versions)
+  if retcode == 0:
+     status, pid = is_server_runing()
+     if status:
+        print "restarting ambari server"
+        stop(options)
+        start(options)
       
 
 if __name__ == "__main__":

+ 94 - 0
ambari-server/src/main/python/ambari_server/enableStack.py

@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import re
+import fileinput
+
+from ambari_commons.exceptions import FatalException
+from ambari_commons.logging_utils import print_info_msg, print_warning_msg, print_error_msg, get_verbose
+from ambari_commons.os_utils import is_root
+from ambari_server.serverConfiguration import get_ambari_properties, get_stack_location
+from ambari_server.serverUtils import is_server_runing
+
+#
+# Stack enable/disable
+#
+
+def enable_stack_version(stack_name, stack_versions):
+  if not is_root():
+    err = 'Ambari-server enable-stack should be run with ' \
+          'root-level privileges'
+    raise FatalException(4, err)
+
+  try:
+    print_info_msg("stack name requested: " + str(stack_name))
+    print_info_msg("stack version requested: " + str(stack_versions))
+  except IndexError:
+    raise FatalException("Invalid stack version passed")
+
+  retcode = update_stack_metainfo(stack_name,stack_versions)
+
+  if not retcode == 0:
+    raise FatalException(retcode, 'Stack enable request failed.')
+
+  return retcode
+
+def update_stack_metainfo(stack_name, stack_versions):
+  properties = get_ambari_properties()
+  if properties == -1:
+    print_error_msg("Error getting ambari properties")
+    return -1
+
+  stack_location = get_stack_location(properties)
+  print_info_msg ("stack location: "+ stack_location)
+
+  stack_root = os.path.join(stack_location, stack_name)
+  print_info_msg ("stack root: "+ stack_root)
+  if not os.path.exists(stack_root):
+    print_error_msg("stack directory does not exists: " + stack_root)
+    return -1
+
+  for stack in stack_versions:
+    if stack  not in os.listdir(stack_root):
+      print_error_msg ("The requested stack version: " + stack + " is not available in the HDP stack")
+      return -1
+
+  for directory in os.listdir(stack_root):
+    print_info_msg("directory found: " + directory)
+    metainfo_file = os.path.join(stack_root, directory, "metainfo.xml")
+    print_info_msg("looking for metainfo file: " + metainfo_file)
+    if not os.path.exists(metainfo_file):
+      print_error_msg("Could not find metainfo file in the path " + metainfo_file)
+      continue
+    if directory in stack_versions:
+       print_info_msg ("updating stack to active for: " + directory )
+       replace(metainfo_file,"<active>false</active>","<active>true</active>")
+    else:
+       print_info_msg ("updating stack to inactive for: " + directory )
+       replace(metainfo_file,"<active>true</active>","<active>false</active>")
+  return 0
+
+def replace(file_path, pattern, subst):
+   for line in fileinput.input(file_path, inplace=1):
+      line = re.sub(pattern,subst, line.rstrip())
+      print(line)
+
+

+ 2 - 1
ambari-server/src/main/python/ambari_server/setupActions.py

@@ -40,4 +40,5 @@ CHECK_DATABASE_ACTION = "check-database"
 BACKUP_ACTION = "backup"
 RESTORE_ACTION = "restore"
 SETUP_JCE_ACTION = "setup-jce"
-DB_CLEANUP_ACTION = "db-cleanup"
+DB_CLEANUP_ACTION = "db-cleanup"
+ENABLE_STACK_ACTION = "enable-stack"

+ 5 - 1
ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py

@@ -177,6 +177,9 @@ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_nam
 
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
 # dfs.namenode.https-address
 import functools
 #create partial functions with common arguments for every HdfsResource call
@@ -193,5 +196,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
 )

+ 4 - 2
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/hbase.py

@@ -215,14 +215,16 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
                              type="directory",
                              action="create_on_execute",
                              owner=params.hbase_user,
-                             mode=0775
+                             mode=0775,
+                             dfs_type=params.dfs_type
         )
 
         params.HdfsResource(params.hbase_staging_dir,
                              type="directory",
                              action="create_on_execute",
                              owner=params.hbase_user,
-                             mode=0711
+                             mode=0711,
+                             dfs_type=params.dfs_type
         )
 
         params.HdfsResource(None, action="execute")

+ 3 - 0
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params_linux.py

@@ -48,3 +48,6 @@ hbase_conf_dir = "/etc/ams-hbase/conf"
 
 limits_conf_dir = "/etc/security/limits.d"
 sudo = AMBARI_SUDO_BINARY
+
+dfs_type = default("/commandParams/dfs_type", "")
+

+ 4 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py

@@ -112,6 +112,8 @@ dfs_data_mirroring_dir = "/apps/data-mirroring"
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -127,6 +129,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
  )
 

+ 5 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py

@@ -227,6 +227,9 @@ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_nam
 
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -242,7 +245,8 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
 )
 
 # ranger host

+ 4 - 1
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -325,6 +325,8 @@ else:
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
@@ -340,7 +342,8 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
 )
 
 

+ 4 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py

@@ -456,6 +456,8 @@ security_param = "true" if security_enabled else "false"
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create hdfs directory we need to call params.HdfsResource in code
@@ -471,7 +473,8 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
  )
 
 

+ 4 - 1
ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py

@@ -75,6 +75,8 @@ log4j_props = config['configurations']['mahout-log4j']['content']
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -90,5 +92,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
 )

+ 5 - 1
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py

@@ -260,6 +260,9 @@ hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_nam
 
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -275,7 +278,8 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
 )
 
 is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']

+ 4 - 1
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py

@@ -76,6 +76,8 @@ log4j_props = config['configurations']['pig-log4j']['content']
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create hdfs directory we need to call params.HdfsResource in code
@@ -91,6 +93,7 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
  )
 

+ 3 - 1
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py

@@ -175,6 +175,7 @@ if has_spark_thriftserver and 'spark-thrift-sparkconf' in config['configurations
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 hdfs_site = config['configurations']['hdfs-site']
 
+dfs_type = default("/commandParams/dfs_type", "")
 
 import functools
 #create partial functions with common arguments for every HdfsResource call
@@ -191,5 +192,6 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
  )

+ 4 - 1
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py

@@ -80,6 +80,8 @@ tez_env_sh_template = config['configurations']['tez-env']['content']
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
@@ -95,7 +97,8 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
 )
 
 

+ 2 - 0
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py

@@ -123,11 +123,13 @@ class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
     params.HdfsResource(output_file,
                         action = "delete_on_execute",
                         type = "directory",
+                        dfs_type = params.dfs_type,
     )
     params.HdfsResource(input_file,
                         action = "create_on_execute",
                         type = "file",
                         source = "/etc/passwd",
+                        dfs_type = params.dfs_type,
     )
     params.HdfsResource(None, action="execute")
 

+ 5 - 2
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py

@@ -31,7 +31,6 @@ from resource_management.libraries.functions.version import format_hdp_stack_ver
 from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
 
-
 import status_params
 
 # a map of the Ambari role to the component name
@@ -267,6 +266,9 @@ is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
 # Path to file that contains list of HDFS resources to be skipped during processing
 hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
 
+dfs_type = default("/commandParams/dfs_type", "")
+
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -282,7 +284,8 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs,
-  immutable_paths = get_not_managed_resources()
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
  )
 update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 

+ 9 - 8
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py

@@ -187,14 +187,15 @@ def yarn(name = None):
   # During RU, Core Masters and Slaves need hdfs-site.xml
   # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
   # RU should rely on all available in /usr/hdp/<version>/hadoop/conf
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig("hdfs-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hdfs-site'],
+              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=0644
+    )
 
   XmlConfig("mapred-site.xml",
             conf_dir=params.hadoop_conf_dir,

+ 4 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -26,6 +26,9 @@ from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions.version import format_hdp_stack_version
 
 config = Script.get_config()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
 sudo = AMBARI_SUDO_BINARY
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
@@ -84,5 +87,5 @@ user_group = config['configurations']['cluster-env']['user_group']
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 has_namenode = not len(namenode_host) == 0
 
-if has_namenode:
+if has_namenode or dfs_type == 'HCFS':
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -54,7 +54,7 @@ def setup_config():
   else:
     Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
   
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0):
+  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
     # create core-site only if the hadoop config diretory exists
     XmlConfig("core-site.xml",
               conf_dir=params.hadoop_conf_dir,

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py

@@ -27,7 +27,7 @@ class BeforeAnyHook(Hook):
     env.set_params(params)
 
     setup_users()
-    if params.has_namenode:
+    if params.has_namenode or params.dfs_type == 'HCFS':
       setup_hadoop_env()
     setup_java()
 

+ 3 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -39,6 +39,8 @@ from ambari_commons.constants import AMBARI_SUDO_BINARY
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+dfs_type = default("/commandParams/dfs_type", "")
+
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jdk_name = default("/hostLevelParams/jdk_name", None)
 java_home = config['hostLevelParams']['java_home']
@@ -183,7 +185,7 @@ has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
 has_ranger_admin = not len(ranger_admin_hosts) == 0
 
-if has_namenode:
+if has_namenode or dfs_type == 'HCFS':
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
 
 hbase_tmp_dir = "/tmp/hbase-hbase"

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py

@@ -136,7 +136,8 @@ def set_uid(user, user_dirs):
 def setup_hadoop_env():
   import params
   stackversion = params.stack_version_unformatted
-  if params.has_namenode or stackversion.find('Gluster') >= 0:
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
     if params.security_enabled:
       tc_owner = "root"
     else:

+ 40 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -27,6 +27,8 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
 
 
 config = Script.get_config()
@@ -36,6 +38,11 @@ host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
+dfs_type = default("/commandParams/dfs_type", "")
+hadoop_conf_dir = "/etc/hadoop/conf"
+
+component_list = default("/localComponents", [])
+
 # hadoop default params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 
@@ -128,7 +135,7 @@ metrics_collection_period = default("/configurations/ams-site/timeline.metrics.s
 
 #hadoop params
 
-if has_namenode:
+if has_namenode or dfs_type == 'HCFS':
   hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
   task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
@@ -216,6 +223,38 @@ net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
 net_topology_mapping_data_file_name = 'topology_mappings.data'
 net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
 
+#Added logic to create /tmp and /user directory for HCFS stack.
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  dfs_type = dfs_type
+)
+
+
 ##### Namenode RPC ports - metrics config section start #####
 
 # Figure out the rpc ports for current namenode

+ 27 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py

@@ -35,7 +35,7 @@ def setup_hadoop():
   )
 
   #directories
-  if params.has_namenode:
+  if params.has_namenode or params.dfs_type == 'HCFS':
     Directory(params.hdfs_log_dir_prefix,
               recursive=True,
               owner='root',
@@ -43,12 +43,13 @@ def setup_hadoop():
               mode=0775,
               cd_access='a',
     )
-    Directory(params.hadoop_pid_dir_prefix,
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
               recursive=True,
               owner='root',
               group='root',
               cd_access='a',
-    )
+      )
     Directory(params.hadoop_tmp_dir,
               recursive=True,
               owner=params.hdfs_user,
@@ -63,7 +64,7 @@ def setup_hadoop():
     # if WebHDFS is not enabled we need this jar to create hadoop folders.
     if params.host_sys_prepped:
       print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
       # for source-code of jar goto contrib/fast-hdfs-resource
       File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
            mode=0644,
@@ -103,6 +104,9 @@ def setup_hadoop():
            content=Template("hadoop-metrics2.properties.j2")
       )
 
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
 
 def setup_configs():
   """
@@ -110,7 +114,7 @@ def setup_configs():
   """
   import params
 
-  if params.has_namenode:
+  if params.has_namenode or params.dfs_type == 'HCFS':
     if os.path.exists(params.hadoop_conf_dir):
       File(params.task_log4j_properties_location,
            content=StaticFile("task-log4j.properties"),
@@ -151,3 +155,21 @@ def create_javahome_symlink():
          to="/usr/jdk64/jdk1.6.0_31",
     )
 
+def create_dirs():
+   import params
+   params.HdfsResource("/tmp",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+

+ 23 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/metainfo.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>false</active>
+    </versions>
+    <extends>2.3</extends>
+</metainfo>

+ 122 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml

@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
+  </os>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos7</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11sp3/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/ubuntu12/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
+  </os>
+  <os family="debian7">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/debian7/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/debian6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu14">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/ubuntu14/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
+  </os>
+</reposinfo>

+ 10 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json

@@ -0,0 +1,10 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "ECS-SERVICE_CHECK": ["ECS-INSTALL"],
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}
+

+ 28 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ACCUMULO/metainfo.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ACCUMULO</name>
+      <extends>common-services/ACCUMULO/1.6.1.2.2.0</extends>
+      <version>1.7.0.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 28 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ATLAS/metainfo.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ATLAS</name>
+      <extends>common-services/ATLAS/0.1.0.2.3</extends>
+      <version>0.5.0.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 129 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml

@@ -0,0 +1,129 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+   <!-- HDFS Configurations -->
+
+    <property>
+      <name>fs.defaultFS</name>
+      <value></value>
+	  <description>Provide VIPRFS bucket details using the format viprfs://$BUCKET_NAME.$NAMESPACE.$SITE_NAME_from_fs.vipr.installations</description>
+    </property>
+
+    <property>
+      <name>hadoop.security.authentication</name>
+      <value>simple</value>
+	  <description>Supported values: simple, kerberos</description>
+    </property>
+
+    <property>
+      <name>hadoop.security.authorization</name>
+      <value>false</value>
+	  <description>Supported values true, false</description>
+    </property>
+
+    <property>
+      <name>hadoop.security.auth_to_local</name>
+      <value>DEFAULT</value>
+    </property>
+
+	<property>
+	  <name>fs.permissions.umask-mode</name>
+	  <value>022</value>
+    </property>  
+
+	<!-- VIPRFS Configurations -->
+
+    <property>
+      <name>fs.vipr.installations</name>
+      <value>Site1</value>
+	  <description>Provide site name of the tenant</description>
+    </property>
+
+    <property>
+      <name>fs.vipr.installation.Site1.hosts</name>
+      <value></value>
+	  <description>Provide ECS node IPs or VIP</description>
+    </property>
+
+    <property>
+      <name>fs.vipr.installation.Site1.resolution</name>
+      <value>dynamic</value>
+    </property>
+
+    <property>
+      <name>fs.vipr.installation.Site1.resolution.dynamic.time_to_live_ms</name>
+      <value>900000</value>
+    </property>
+
+    <property>
+      <name>fs.viprfs.auth.anonymous_translation</name>
+      <value>LOCAL_USER</value>
+	  <final>true</final>
+	  <description>Supported values are LOCAL_USER. Applicable only for insecure cluster deployment.</description>
+    </property>
+
+    <property>
+      <name>fs.viprfs.auth.identity_translation</name>
+      <value>NONE</value>
+	  <description>Supported values are NONE(default), FIXED_REALM, and CURRENT_USER_REALM</description>
+    </property>
+
+    <!--  Moving the configuration to kerberos.json as this is applicable to only secure cluster
+    <property>
+      <name>viprfs.security.principal</name>
+      <value>NONE</value>
+	  <description>Modify the value for secure cluster setup. Provide object engine security principal name using the format: vipr/_HOST@ECS_REALM</description>
+    </property>
+    -->
+    
+    <property>
+      <name>fs.viprfs.impl</name>
+      <value>com.emc.hadoop.fs.vipr.ViPRFileSystem</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.AbstractFileSystem.viprfs.impl</name>
+      <value>com.emc.hadoop.fs.vipr.ViPRAbstractFileSystem</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.trace.viprfs.dfs.impl</name>
+      <value>com.emc.hadoop.fs.trace.TraceDistributedFileSystem</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.trace.viprfs.dfs.inner</name>
+      <value>org.apache.hadoop.hdfs.DistributedFileSystemShim</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.viprfs.dfs.impl</name>
+      <value>org.apache.hadoop.hdfs.DistributedFileSystemShim</value>
+	  <final>true</final>
+    </property>
+    
+</configuration>
+

+ 130 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml

@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <property-type>GROUP</property-type>`
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>ECS bucket owner user</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+  </property>
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+
+export VIPRFS_LIBS=/usr/lib/hadoop/lib/*
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:${VIPRFS_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

+ 40 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+  </property>
+
+</configuration>

+ 53 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/kerberos.json

@@ -0,0 +1,53 @@
+{
+  "services": [
+    {
+      "name": "ECS",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "hdfs",
+          "principal": {
+            "value": "${hadoop-env/hdfs_user}-${cluster_name}@${realm}",
+            "type" : "user" ,
+            "configuration": "hadoop-env/hdfs_principal_name",
+            "local_username" : "${hadoop-env/hdfs_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hdfs.headless.keytab",
+            "owner": {
+              "name": "${hadoop-env/hdfs_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hadoop-env/hdfs_user_keytab"
+          }
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "fs.viprfs.auth.identity_translation": "CURRENT_USER_REALM",
+            "viprfs.security.principal": "",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "ECS_CLIENT"
+        }
+       ]
+    }
+  ]
+}
+

+ 84 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml

@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ECS</name>
+      <displayName>ECS</displayName>
+      <serviceType>HCFS</serviceType>
+      <comment>Hadoop Compatible File System Client for ECS</comment>
+      <version>2.3.0.0</version>
+      <components>
+        <component>
+          <name>ECS_CLIENT</name>
+          <displayName>ECS Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/ecs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>                       
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>                       
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+          </configFile>          
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-client</name>
+            </package>
+            <package>
+              <name>viprfs-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

+ 112 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py

@@ -0,0 +1,112 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import os
+from resource_management import *
+
+class ECSClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    self.setup_config(env)
+    self.setup_hadoop_env(env)
+
+  def createdirs(self, env):
+    self.create_dirs(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def setup_config(self, env):
+    import params
+    env.set_params(params)
+    stackversion = params.stack_version_unformatted
+
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+    XmlConfig("hdfs-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hdfs-site'],
+              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+    File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
+    )
+
+  def setup_hadoop_env(self, env):
+    import params
+    env.set_params(params)
+    stackversion = params.stack_version_unformatted
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))
+
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=0777
+    )
+
+  def create_dirs(self,env):
+    import params
+    env.set_params(params)
+    params.HdfsResource("/tmp",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+    )
+    params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+    )
+    params.HdfsResource(None,
+                      action="execute"
+    )
+
+if __name__ == "__main__":
+  ECSClient().execute()
+

+ 82 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py

@@ -0,0 +1,82 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+import os
+import itertools
+import re
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import hdp_select
+
+config = Script.get_config()
+
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+jdk_location = config['hostLevelParams']['jdk_location']
+
+#hadoop_conf_dir = "/etc/hadoop/conf"
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = functions.get_kinit_path()
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+java64_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hadoop_dir = "/etc/hadoop"
+user_group = config['configurations']['cluster-env']['user_group']
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+tmp_dir = Script.get_tmp_dir()
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/commandParams/dfs_type", "")
+
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  dfs_type = dfs_type
+)

+ 47 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py

@@ -0,0 +1,47 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class ECSServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    # run fs list command to make sure ECS client can talk to ECS backend
+    list_command = format("fs -ls /")
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+
+    ExecuteHadoop(list_command,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
+    )
+
+
+if __name__ == "__main__":
+  ECSServiceCheck().execute()

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/FALCON/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FALCON</name>
+      <version>0.6.1.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/FLUME/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <version>1.5.2.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 107 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml

@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Add ECS Client Classpath
+export VIPRFS_LIBS=/usr/lib/hadoop/lib/*
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}:${VIPRFS_LIBS}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if java_version &lt; 8 %}
+JDK_DEPENDED_OPTS="-XX:PermSize=128m -XX:MaxPermSize=128m"
+{% endif %}      
+      
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export PHOENIX_QUERYSERVER_OPTS="$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} $JDK_DEPENDED_OPTS"
+{% endif %}
+    </value>
+  </property>
+
+</configuration>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+   <property>
+       <name>hbase.rootdir</name>
+       <value></value>
+   </property>
+</configuration>

+ 132 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/kerberos.json

@@ -0,0 +1,132 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}-${cluster_name}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.security.authorization": "true",
+            "zookeeper.znode.parent": "/hbase-secure",
+              "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
+              "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PHOENIX_QUERY_SERVER",
+          "identities": [
+            {
+              "name": "hbase_queryserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/phoenix.queryserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/phoenix.queryserver.keytab.file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
+

+ 58 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/metainfo.xml

@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <version>1.1.1.2.3</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <timelineAppid>HBASE</timelineAppid>
+          <dependencies>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>ECS</service>
+      </requiredServices>
+    </service>
+  </services>
+</metainfo>
+

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HDFS/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.1.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 91 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HIVE/metainfo.xml

@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <version>1.2.1.2.3</version>
+	  <components>
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <clientsToUpdateConfigs>
+            <client>HCAT</client>
+          </clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>PIG/PIG</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+	  </components>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>YARN</service>
+        <service>TEZ</service>
+      </requiredServices>
+            
+    </service>
+  </services>
+</metainfo>
+

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/KAFKA/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <version>0.8.2.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/KERBEROS/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+      <extends>common-services/KERBEROS/1.10.3-10</extends>
+    </service>
+  </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/KNOX/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <version>0.6.0.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 28 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/MAHOUT/metainfo.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+      <service>
+        <name>MAHOUT</name>
+        <version>0.9.0.2.3</version>
+        <extends>common-services/MAHOUT/1.0.0.2.3</extends>
+        <deleted>true</deleted>
+      </service>
+    </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/OOZIE/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <extends>common-services/OOZIE/4.2.0.2.3</extends>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 32 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/RANGER/metainfo.xml

@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER</name>
+      <displayName>Ranger</displayName>
+      <comment>Comprehensive security for Hadoop</comment>
+      <version>0.5.0.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 30 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/RANGER_KMS/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER_KMS</name>
+      <extends>common-services/RANGER_KMS/0.5.0.2.3</extends>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/SLIDER/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLIDER</name>
+      <version>0.80.0.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 30 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/SPARK/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+          <name>SPARK</name>
+          <version>1.3.1.2.3</version>
+          <deleted>true</deleted>
+        </service>
+    </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/SQOOP/metainfo.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <version>1.4.6.2.3</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 28 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/STORM/metainfo.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <version>0.10.0</version>
+      <deleted>true</deleted>
+    </service>
+  </services>
+</metainfo>

+ 27 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>tez.cluster.additional.classpath.prefix</name>
+    <value>/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure:/usr/lib/hadoop/lib/*</value>
+    <description></description>
+  </property>
+
+</configuration>

+ 59 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/metainfo.xml

@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TEZ</name>
+      <version>0.7.0.2.3</version>
+      <components>
+        <component>
+          <name>TEZ_CLIENT</name>
+          <displayName>Tez Client</displayName>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <category>CLIENT</category>
+          <dependencies>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+    </service>
+  </services>
+</metainfo>
+

+ 34 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml

@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure:/usr/lib/hadoop/lib/*</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+  </property>
+
+</configuration>

+ 29 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*,/usr/lib/hadoop/lib/*</value>
+    <description>Classpath for typical applications.</description>
+  </property>
+</configuration>

+ 215 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json

@@ -0,0 +1,215 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "false",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.yarn.groups": "*",
+            "hadoop.proxyuser.yarn.hosts": "${yarn-site/yarn.resourcemanager.hostname}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
+

+ 145 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/metainfo.xml

@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <version>2.7.1.2.3</version>
+      <requiredServices>
+        <service>ECS</service>
+        <service>MAPREDUCE2</service>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+      <components>
+        <component>
+          <name>NODEMANAGER</name>
+          <displayName>NodeManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+        <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <displayName>App Timeline Server</displayName>
+          <category>MASTER</category>
+          <cardinality>0-1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+       <component>
+         <name>RESOURCEMANAGER</name>
+         <category>MASTER</category>
+         <cardinality>1-2</cardinality>
+         <dependencies>
+           <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+           </dependency>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+         </dependencies>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <version>2.7.1.2.3</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <displayName>History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>ECS/ECS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <displayName>MapReduce2 Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+        </component>
+      </components>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+    </service>
+
+  </services>
+</metainfo>

+ 51 - 0
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ZOOKEEPER/metainfo.xml

@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <version>3.4.6.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_2_3_*</name>
+            </package>
+            <package>
+              <name>zookeeper_2_3_*-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper-2-3-.*</name>
+            </package>
+            <package>
+              <name>zookeeper-2-3-.*-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

+ 17 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -177,7 +177,7 @@ public class AmbariManagementControllerTest {
   private static final String PROPERTY_NAME = "hbase.regionserver.msginterval";
   private static final String SERVICE_NAME = "HDFS";
   private static final String FAKE_SERVICE_NAME = "FAKENAGIOS";
-  private static final int STACK_VERSIONS_CNT = 14;
+  private static final int STACK_VERSIONS_CNT = 15;
   private static final int REPOS_CNT = 3;
   private static final int STACKS_CNT = 3;
   private static final int STACK_PROPERTIES_CNT = 103;
@@ -4215,6 +4215,13 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("a1", task.getRole().name());
     Assert.assertEquals("h1", task.getHostName());
     ExecutionCommand cmd = task.getExecutionCommandWrapper().getExecutionCommand();
+    // h1 has only DATANODE, NAMENODE, CLIENT sch's
+    Assert.assertEquals("h1", cmd.getHostname());
+    Assert.assertFalse(cmd.getLocalComponents().isEmpty());
+    Assert.assertTrue(cmd.getLocalComponents().contains(Role.DATANODE.name()));
+    Assert.assertTrue(cmd.getLocalComponents().contains(Role.NAMENODE.name()));
+    Assert.assertTrue(cmd.getLocalComponents().contains(Role.HDFS_CLIENT.name()));
+    Assert.assertFalse(cmd.getLocalComponents().contains(Role.RESOURCEMANAGER.name()));
     Type type = new TypeToken<Map<String, String>>(){}.getType();
     Map<String, String> hostParametersStage = StageUtils.getGson().fromJson(stage.getHostParamsStage(), type);
     Map<String, String> commandParametersStage = StageUtils.getGson().fromJson(stage.getCommandParamsStage(), type);
@@ -4267,6 +4274,13 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("HDFS", cmd.getServiceName());
     Assert.assertEquals("DATANODE", cmd.getComponentName());
     Assert.assertEquals(requestProperties.get(REQUEST_CONTEXT_PROPERTY), response.getRequestContext());
+    // h2 has only DATANODE sch
+    Assert.assertEquals("h2", cmd.getHostname());
+    Assert.assertFalse(cmd.getLocalComponents().isEmpty());
+    Assert.assertTrue(cmd.getLocalComponents().contains(Role.DATANODE.name()));
+    Assert.assertFalse(cmd.getLocalComponents().contains(Role.NAMENODE.name()));
+    Assert.assertFalse(cmd.getLocalComponents().contains(Role.HDFS_CLIENT.name()));
+    Assert.assertFalse(cmd.getLocalComponents().contains(Role.RESOURCEMANAGER.name()));
 
     hosts = new ArrayList<String>() {{add("h3");}};
     resourceFilters.clear();
@@ -10331,6 +10345,7 @@ public class AmbariManagementControllerTest {
     Assert.assertTrue(commandParamsStage.containsKey("some_custom_param"));
     Assert.assertEquals(null, cmd.getServiceName());
     Assert.assertEquals(null, cmd.getComponentName());
+    Assert.assertTrue(cmd.getLocalComponents().isEmpty());
 
     Assert.assertEquals(requestProperties.get(REQUEST_CONTEXT_PROPERTY), response.getRequestContext());
 
@@ -10373,6 +10388,7 @@ public class AmbariManagementControllerTest {
     Assert.assertTrue(commandParamsStage.containsKey("some_custom_param"));
     Assert.assertEquals(null, cmd.getServiceName());
     Assert.assertEquals(null, cmd.getComponentName());
+    Assert.assertTrue(cmd.getLocalComponents().isEmpty());
 
     Assert.assertEquals(requestProperties.get(REQUEST_CONTEXT_PROPERTY), response.getRequestContext());
   }

+ 13 - 2
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java

@@ -111,18 +111,29 @@ public class StackManagerTest {
   @Test
   public void testGetsStacks() throws Exception {
     Collection<StackInfo> stacks = stackManager.getStacks();
-    assertEquals(18, stacks.size());
+    assertEquals(19, stacks.size());
   }
 
   @Test
   public void testGetStacksByName() {
     Collection<StackInfo> stacks = stackManager.getStacks("HDP");
-    assertEquals(14, stacks.size());
+    assertEquals(15, stacks.size());
 
     stacks = stackManager.getStacks("OTHER");
     assertEquals(2, stacks.size());
   }
 
+  @Test
+  public void testHCFSServiceType() {
+    
+    StackInfo stack = stackManager.getStack("HDP", "2.2.0.ECS");
+    ServiceInfo service = stack.getService("ECS");
+    assertEquals(service.getServiceType(),"HCFS");
+    
+    service = stack.getService("HDFS");
+    assertNull(service);
+  }  
+
   @Test
   public void testGetStack() {
     StackInfo stack = stackManager.getStack("HDP", "0.1");

+ 8 - 0
ambari-server/src/test/java/org/apache/ambari/server/state/ServiceInfoTest.java

@@ -62,6 +62,13 @@ public class ServiceInfoTest {
         "      <comment>Apache Hadoop Distributed File System</comment>\n" +
         "      <version>2.1.0.2.0</version>\n" +
         "    </service>\n" +
+        "    <service>\n" +
+        "      <name>HCFS_SERVICE</name>\n" +
+        "      <displayName>HCFS_SERVICE</displayName>\n" +
+        "      <comment>Hadoop Compatible File System</comment>\n" +
+        "      <version>2.1.1.0</version>\n" +
+        "      <serviceType>HCFS</serviceType>\n" +
+        "    </service>\n" +
         "  </services>\n" +
         "</metainfo>\n";
     
@@ -70,6 +77,7 @@ public class ServiceInfoTest {
     assertTrue(serviceInfoMap.get("RESTART").isRestartRequiredAfterRackChange());
     assertFalse(serviceInfoMap.get("NO_RESTART").isRestartRequiredAfterRackChange());
     assertNull(serviceInfoMap.get("DEFAULT_RESTART").isRestartRequiredAfterRackChange());
+    assertEquals(serviceInfoMap.get("HCFS_SERVICE").getServiceType(),"HCFS");
   }
 
   @Test

+ 2 - 0
ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py

@@ -327,6 +327,7 @@ class TestMetricsCollector(RMFTestCase):
                                   keytab = UnknownConfigurationMock(),
                                   kinit_path_local = '/usr/bin/kinit',
                                   user = 'hdfs',
+                                  dfs_type = '',
                                   owner = 'ams',
                                   mode = 0775,
                                   hadoop_conf_dir = '/etc/hadoop/conf',
@@ -343,6 +344,7 @@ class TestMetricsCollector(RMFTestCase):
                                   keytab = UnknownConfigurationMock(),
                                   kinit_path_local = '/usr/bin/kinit',
                                   user = 'hdfs',
+                                  dfs_type = '',
                                   owner = 'ams',
                                   mode = 0711,
                                   hadoop_conf_dir = '/etc/hadoop/conf',

+ 9 - 0
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py

@@ -337,6 +337,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -350,6 +351,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
@@ -364,6 +366,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -475,6 +478,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -488,6 +492,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
@@ -502,6 +507,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -624,6 +630,7 @@ class TestHBaseMaster(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -639,6 +646,7 @@ class TestHBaseMaster(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -657,6 +665,7 @@ class TestHBaseMaster(RMFTestCase):
         user = 'hdfs',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        dfs_type = ''
     )
 
     self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-master/conf start master',

+ 27 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py

@@ -106,6 +106,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -123,6 +124,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -140,6 +142,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -219,6 +222,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -236,6 +240,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -253,6 +258,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -344,6 +350,7 @@ class TestNamenode(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -358,6 +365,7 @@ class TestNamenode(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -373,6 +381,7 @@ class TestNamenode(RMFTestCase):
         hadoop_bin_dir = '/usr/bin',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -444,6 +453,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -461,6 +471,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -478,6 +489,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -541,6 +553,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -558,6 +571,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -575,6 +589,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -644,6 +659,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -661,6 +677,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -678,6 +695,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -747,6 +765,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -764,6 +783,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -781,6 +801,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -849,6 +870,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -866,6 +888,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -883,6 +906,7 @@ class TestNamenode(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -959,6 +983,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = None,
                               user = 'hdfs',
+                              dfs_type = '',
                               owner = 'hdfs',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
@@ -976,6 +1001,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = None,
                               user = 'hdfs',
+                              dfs_type = '',
                               owner = 'ambari-qa',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
@@ -993,6 +1019,7 @@ class TestNamenode(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = None,
                               user = 'hdfs',
+                              dfs_type = '',
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               )

+ 4 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py

@@ -70,6 +70,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -85,6 +86,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -100,6 +102,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -114,6 +117,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = None,
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )

+ 14 - 0
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py

@@ -338,6 +338,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -351,6 +352,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -369,6 +371,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -382,6 +385,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -396,6 +400,7 @@ class TestHiveServer(RMFTestCase):
           keytab = UnknownConfigurationMock(),
           kinit_path_local = '/usr/bin/kinit',
           user = 'hdfs',
+          dfs_type = '',
           owner = 'hive',
           group = 'hdfs',
           hadoop_bin_dir = '/usr/bin',
@@ -410,6 +415,7 @@ class TestHiveServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -526,6 +532,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -539,6 +546,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hcat',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -553,6 +561,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -566,6 +575,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hive',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -579,6 +589,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hive',
         group = 'hdfs',
         hadoop_bin_dir = '/usr/bin',
@@ -593,6 +604,7 @@ class TestHiveServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -927,6 +939,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
@@ -969,6 +982,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py

@@ -101,6 +101,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'missing_principal',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -117,6 +118,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'missing_principal',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -132,6 +134,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'missing_principal',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -214,6 +217,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -230,6 +234,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -247,6 +252,7 @@ class TestServiceCheck(RMFTestCase):
         user = 'hdfs',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
+        dfs_type = ''
     )
     self.assertResourceCalled('Execute', '/tmp/templetonSmoke.sh c6402.ambari.apache.org ambari-qa 50111 idtest.ambari-qa.1431110511.43.pig /etc/security/keytabs/smokeuser.headless.keytab true /usr/bin/kinit ambari-qa@EXAMPLE.COM',
         logoutput = True,

+ 16 - 0
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py

@@ -71,6 +71,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'oozie',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -84,6 +85,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -285,6 +287,7 @@ class TestOozieServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = UnknownConfigurationMock(),
                               user = 'hdfs',
+                              dfs_type = '',
                               owner = 'oozie',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
@@ -301,6 +304,7 @@ class TestOozieServer(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = UnknownConfigurationMock(),
                               user = 'hdfs',
+                              dfs_type = '',
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               )
@@ -510,6 +514,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         default_fs = 'hdfs://c6401.ambari.apache.org:8020',
         user = 'hdfs',
+        dfs_type = '',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
@@ -529,6 +534,7 @@ class TestOozieServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -605,6 +611,7 @@ class TestOozieServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         default_fs = 'hdfs://c6401.ambari.apache.org:8020',
         user = 'hdfs',
+        dfs_type = '',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
@@ -626,6 +633,7 @@ class TestOozieServer(RMFTestCase):
         user = 'hdfs',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
+        dfs_type = ''
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
         environment = {'OOZIE_CONFIG': '/etc/oozie/conf'},
@@ -660,6 +668,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'oozie',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
@@ -673,6 +682,7 @@ class TestOozieServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -844,6 +854,7 @@ class TestOozieServer(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'oozie',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
@@ -860,6 +871,7 @@ class TestOozieServer(RMFTestCase):
         user = 'hdfs',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
+        dfs_type = ''
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
                               owner = 'oozie',
@@ -1386,6 +1398,7 @@ class TestOozieServer(RMFTestCase):
       keytab = UnknownConfigurationMock(),
       default_fs = 'hdfs://c6401.ambari.apache.org:8020',
       user = 'hdfs',
+      dfs_type = '',
       hdfs_site = UnknownConfigurationMock(),
       kinit_path_local = '/usr/bin/kinit',
       principal_name = UnknownConfigurationMock(),
@@ -1407,6 +1420,7 @@ class TestOozieServer(RMFTestCase):
       kinit_path_local = '/usr/bin/kinit',
       principal_name = UnknownConfigurationMock(),
       user = 'hdfs',
+      dfs_type = '',
       action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
       hadoop_conf_dir = '/usr/hdp/2.3.0.0-1234/hadoop/conf' )
 
@@ -1459,6 +1473,7 @@ class TestOozieServer(RMFTestCase):
       keytab = UnknownConfigurationMock(),
       default_fs = 'hdfs://c6401.ambari.apache.org:8020',
       user = 'hdfs',
+      dfs_type = '',
       hdfs_site = UnknownConfigurationMock(),
       kinit_path_local = '/usr/bin/kinit',
       principal_name = UnknownConfigurationMock(),
@@ -1480,6 +1495,7 @@ class TestOozieServer(RMFTestCase):
       kinit_path_local = '/usr/bin/kinit',
       principal_name = UnknownConfigurationMock(),
       user = 'hdfs',
+      dfs_type = '',
       action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
       hadoop_conf_dir = '/usr/hdp/2.3.0.0-1234/hadoop/conf' )
 

+ 5 - 0
ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py

@@ -90,6 +90,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -102,6 +103,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '//examples',
         user = 'hdfs',
+        dfs_type = '',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -115,6 +117,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -127,6 +130,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '//examples/input-data',
         user = 'hdfs',
+        dfs_type = '',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -140,6 +144,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py

@@ -43,6 +43,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -59,6 +60,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -74,6 +76,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -116,6 +119,7 @@ class TestPigServiceCheck(RMFTestCase):
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
+        dfs_type = '',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -133,6 +137,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -148,6 +153,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )

+ 17 - 1
ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py

@@ -63,6 +63,7 @@ class TestHistoryServer(RMFTestCase):
                           type="directory",
                           action=["create_on_execute"], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                           user=u"hdfs",
+                          dfs_type = '',
                           owner=u"tez",
                           mode=493,
                           hadoop_bin_dir="/usr/bin",
@@ -81,6 +82,7 @@ class TestHistoryServer(RMFTestCase):
                               action=["create_on_execute"], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               user=u'hdfs',
                               owner=u"tez",
+                              dfs_type = '',
                               mode=493,
                               hadoop_bin_dir="/usr/bin",
                               hadoop_conf_dir="/etc/hadoop/conf",
@@ -97,6 +99,7 @@ class TestHistoryServer(RMFTestCase):
                               action=['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               user=u'hdfs',
                               hadoop_bin_dir="/usr/bin",
+                              dfs_type = '',
                               hadoop_conf_dir="/etc/hadoop/conf",
                               hdfs_site=self.getConfig()["configurations"]["hdfs-site"],
                               default_fs=u'hdfs://c6401.ambari.apache.org:8020',
@@ -200,6 +203,7 @@ class TestHistoryServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         kinit_path_local = '/usr/bin/kinit',
         recursive_chmod = True,
         owner = 'yarn',
@@ -219,6 +223,7 @@ class TestHistoryServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'yarn',
         group = 'hadoop',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -231,7 +236,8 @@ class TestHistoryServer(RMFTestCase):
         hadoop_bin_dir = '/usr/bin',
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
-        user = 'hdfs',
+        user = 'hdfs', 
+        dfs_type = '',
         owner = 'mapred',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -244,6 +250,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -257,6 +264,7 @@ class TestHistoryServer(RMFTestCase):
         change_permissions_for_parents = True,
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'mapred',
         group = 'hadoop',
         hadoop_bin_dir = '/usr/bin',
@@ -271,6 +279,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -448,6 +457,7 @@ class TestHistoryServer(RMFTestCase):
         hadoop_conf_dir = '/etc/hadoop/conf',
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         user = 'hdfs',
+        dfs_type = '',
         kinit_path_local = '/usr/bin/kinit',
         recursive_chmod = True,
         owner = 'yarn',
@@ -467,6 +477,7 @@ class TestHistoryServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'yarn',
         group = 'hadoop',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -480,6 +491,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'mapred',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -492,6 +504,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -505,6 +518,7 @@ class TestHistoryServer(RMFTestCase):
         change_permissions_for_parents = True,
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'mapred',
         group = 'hadoop',
         hadoop_bin_dir = '/usr/bin',
@@ -519,6 +533,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -848,6 +863,7 @@ class TestHistoryServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )

+ 6 - 0
ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py

@@ -47,6 +47,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -59,6 +60,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
         user = 'hdfs',
+        dfs_type = '',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -70,6 +72,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -104,6 +107,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -116,6 +120,7 @@ class TestServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '/etc/passwd',
         user = 'hdfs',
+        dfs_type = '',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -127,6 +132,7 @@ class TestServiceCheck(RMFTestCase):
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )

+ 6 - 0
ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py

@@ -139,6 +139,7 @@ class TestFalconServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'falcon',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -156,6 +157,7 @@ class TestFalconServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'falcon',
         group='users',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -174,6 +176,7 @@ class TestFalconServer(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -325,6 +328,7 @@ class TestFalconServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'falcon',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -343,6 +347,7 @@ class TestFalconServer(RMFTestCase):
         source = '/usr/hdp/current/falcon-server/data-mirroring',
         default_fs = 'hdfs://c6401.ambari.apache.org:8020',
         user = 'hdfs',
+        dfs_type = '',
         hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
@@ -365,6 +370,7 @@ class TestFalconServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )

+ 8 - 0
ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py

@@ -46,6 +46,7 @@ class TestTezServiceCheck(RMFTestCase):
       keytab = UnknownConfigurationMock(),
       kinit_path_local = '/usr/bin/kinit',
       user = 'hdfs',
+      dfs_type = '',
       hadoop_conf_dir = '/etc/hadoop/conf',
       type = 'directory',
       action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
@@ -58,6 +59,7 @@ class TestTezServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
@@ -71,6 +73,7 @@ class TestTezServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '/tmp/sample-tez-test',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'file',
@@ -83,6 +86,7 @@ class TestTezServiceCheck(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
@@ -126,6 +130,7 @@ class TestTezServiceCheck(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = 'hdfs',
                               user = 'hdfs',
+                              dfs_type = '',
                               action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
@@ -140,6 +145,7 @@ class TestTezServiceCheck(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = 'hdfs',
                               user = 'hdfs',
+                              dfs_type = '',
                               owner = 'ambari-qa',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'directory',
@@ -156,6 +162,7 @@ class TestTezServiceCheck(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = 'hdfs',
                               user = 'hdfs',
+                              dfs_type = '',
                               owner = 'ambari-qa',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               type = 'file',
@@ -171,6 +178,7 @@ class TestTezServiceCheck(RMFTestCase):
                               kinit_path_local = '/usr/bin/kinit',
                               principal_name = 'hdfs',
                               user = 'hdfs',
+                              dfs_type = '',
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               )

+ 1 - 0
ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py

@@ -200,6 +200,7 @@ class TestAppTimelineServer(RMFTestCase):
                               user = 'hdfs',
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
+                              dfs_type = ''
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
                               owner = 'yarn',

+ 6 - 0
ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py

@@ -48,6 +48,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -64,6 +65,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'file',
@@ -79,6 +81,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
@@ -111,6 +114,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -127,6 +131,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'file',
@@ -144,6 +149,7 @@ class TestPigServiceCheck(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = 'hdfs@EXAMPLE.COM',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )

+ 6 - 0
ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py

@@ -61,6 +61,7 @@ class TestJobHistoryServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
@@ -125,6 +126,7 @@ class TestJobHistoryServer(RMFTestCase):
         kinit_path_local='/usr/bin/kinit',
         principal_name=UnknownConfigurationMock(),
         security_enabled=True,
+        dfs_type = '',
         user=UnknownConfigurationMock()
     )
 
@@ -175,6 +177,7 @@ class TestJobHistoryServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         owner = 'spark',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -191,6 +194,7 @@ class TestJobHistoryServer(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )
@@ -248,6 +252,7 @@ class TestJobHistoryServer(RMFTestCase):
         user = UnknownConfigurationMock(),
         owner = 'spark',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        dfs_type = '',
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0775,
@@ -264,6 +269,7 @@ class TestJobHistoryServer(RMFTestCase):
         user = UnknownConfigurationMock(),
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        dfs_type = '',
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
         owner = 'spark',

+ 4 - 0
ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py

@@ -45,6 +45,7 @@ class TestMahoutClient(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -56,6 +57,7 @@ class TestMahoutClient(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'directory',
@@ -69,6 +71,7 @@ class TestMahoutClient(RMFTestCase):
         kinit_path_local = '/usr/bin/kinit',
         source = '/tmp/sample-mahout-test.txt',
         user = 'hdfs',
+        dfs_type = '',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
         type = 'file',
@@ -81,6 +84,7 @@ class TestMahoutClient(RMFTestCase):
         keytab = UnknownConfigurationMock(),
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
+        dfs_type = '',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
     )

+ 2 - 0
ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py

@@ -106,6 +106,7 @@ class TestSparkThriftServer(RMFTestCase):
         type = 'directory',
         action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         mode = 0775,
+        dfs_type = ''
     )
     self.assertResourceCalled('HdfsResource', None,
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -119,6 +120,7 @@ class TestSparkThriftServer(RMFTestCase):
         user = 'hdfs',
         action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+        dfs_type = ''
     )
     self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
         owner = 'spark',

+ 5 - 0
ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py

@@ -168,6 +168,7 @@ class TestAts(RMFTestCase):
                               type = 'directory',
                               action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               mode = 0755,
+                              dfs_type = ''
                               )
     self.assertResourceCalled('HdfsResource', '/ats/done',
                               immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -185,6 +186,7 @@ class TestAts(RMFTestCase):
                               type = 'directory',
                               action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               mode = 0700,
+                              dfs_type = ''
                               )
     self.assertResourceCalled('HdfsResource', '/ats',
                               immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -203,6 +205,7 @@ class TestAts(RMFTestCase):
                               type = 'directory',
                               action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               mode = 0755,
+                              dfs_type = ''
                               )
     self.assertResourceCalled('HdfsResource', '/ats/active',
                               immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -220,6 +223,7 @@ class TestAts(RMFTestCase):
                               type = 'directory',
                               action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               mode = 01777,
+                              dfs_type = ''
                               )
     self.assertResourceCalled('HdfsResource', None,
                               immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
@@ -233,6 +237,7 @@ class TestAts(RMFTestCase):
                               user = 'hdfs',
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
+                              dfs_type = ''
                               )
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
                               owner = 'yarn',

Some files were not shown because too many files changed in this diff