소스 검색

AMBARI-7202: Add functionality for Ambari Common Services

Jayush Luniya 10 년 전
부모
커밋
14c869961a
50개의 변경된 파일4452개의 추가작업 그리고 191개의 파일을 삭제
  1. 1 0
      ambari-server/conf/unix/ambari.properties
  2. 1 0
      ambari-server/conf/windows/ambari.properties
  3. 8 2
      ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
  4. 9 0
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  5. 23 5
      ambari-server/src/main/java/org/apache/ambari/server/stack/BaseModule.java
  6. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/ComponentModule.java
  7. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationModule.java
  8. 29 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/ModuleState.java
  9. 76 9
      ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
  10. 14 3
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java
  11. 143 33
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
  12. 133 60
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
  13. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  14. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java
  15. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  16. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/stack/ComponentModuleTest.java
  17. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java
  18. 167 0
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
  19. 91 0
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
  20. 8 72
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
  21. 137 0
      ambari-server/src/test/resources/common-services/HBASE/1.0/configuration/hbase-site.xml
  22. 121 0
      ambari-server/src/test/resources/common-services/HBASE/1.0/metainfo.xml
  23. 137 0
      ambari-server/src/test/resources/common-services/HDFS/1.0/configuration/hbase-site.xml
  24. 396 0
      ambari-server/src/test/resources/common-services/HDFS/1.0/configuration/hdfs-site.xml
  25. 133 0
      ambari-server/src/test/resources/common-services/HDFS/1.0/metainfo.xml
  26. 127 0
      ambari-server/src/test/resources/common-services/HIVE/1.0/metainfo.xml
  27. 137 0
      ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/hbase-site.xml
  28. 396 0
      ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/hdfs-site.xml
  29. 400 0
      ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
  30. 89 0
      ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/metainfo.xml
  31. 52 0
      ambari-server/src/test/resources/common-services/PIG/1.0/configuration/pig.properties
  32. 61 0
      ambari-server/src/test/resources/common-services/PIG/1.0/metainfo.xml
  33. 72 0
      ambari-server/src/test/resources/common-services/ZOOKEEPER/1.0/metainfo.xml
  34. 22 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/metainfo.xml
  35. 57 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/repos/repoinfo.xml
  36. 46 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/services/HDFS/metainfo.xml
  37. 23 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/services/MAPREDUCE/metainfo.xml
  38. 26 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/services/PIG/metainfo.xml
  39. 22 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/metainfo.xml
  40. 57 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/repos/repoinfo.xml
  41. 26 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HBASE/metainfo.xml
  42. 145 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
  43. 223 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
  44. 137 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hbase-site.xml
  45. 199 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hdfs-log4j.xml
  46. 396 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hdfs-site.xml
  47. 30 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/metainfo.xml
  48. 26 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HIVE/metainfo.xml
  49. 23 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/MAPREDUCE/metainfo.xml
  50. 26 0
      ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/ZOOKEEPER/metainfo.xml

+ 1 - 0
ambari-server/conf/unix/ambari.properties

@@ -27,6 +27,7 @@ jce_policy1.6.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jce_policy-6.zi
 jdk1.7.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-7u67-linux-x64.tar.gz
 jdk1.7.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-7u67-linux-x64.tar.gz
 jce_policy1.7.url=http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip
 jce_policy1.7.url=http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip
 metadata.path=/var/lib/ambari-server/resources/stacks
 metadata.path=/var/lib/ambari-server/resources/stacks
+common.services.path=
 server.version.file=/var/lib/ambari-server/resources/version
 server.version.file=/var/lib/ambari-server/resources/version
 webapp.dir=/usr/lib/ambari-server/web
 webapp.dir=/usr/lib/ambari-server/web
 bootstrap.dir=/var/run/ambari-server/bootstrap
 bootstrap.dir=/var/run/ambari-server/bootstrap

+ 1 - 0
ambari-server/conf/windows/ambari.properties

@@ -32,6 +32,7 @@ jdk1.7.67.jcpol-file=UnlimitedJCEPolicyJDK7.zip
 jdk1.7.67.home=C:\\jdk1.7.0_67
 jdk1.7.67.home=C:\\jdk1.7.0_67
 
 
 metadata.path=resources\\stacks
 metadata.path=resources\\stacks
+common.services.path=
 server.version.file=version
 server.version.file=version
 webapp.dir=web
 webapp.dir=web
 bootstrap.dir=bootstrap
 bootstrap.dir=bootstrap

+ 8 - 2
ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java

@@ -124,6 +124,7 @@ public class AmbariMetaInfo {
   private StackManager stackManager;
   private StackManager stackManager;
 
 
   private File stackRoot;
   private File stackRoot;
+  private File commonServicesRoot;
   private File serverVersionFile;
   private File serverVersionFile;
   private File customActionRoot;
   private File customActionRoot;
 
 
@@ -173,16 +174,21 @@ public class AmbariMetaInfo {
   @Inject
   @Inject
   public AmbariMetaInfo(Configuration conf) throws Exception {
   public AmbariMetaInfo(Configuration conf) throws Exception {
     String stackPath = conf.getMetadataPath();
     String stackPath = conf.getMetadataPath();
+    String commonServicesPath = conf.getCommonServicesPath();
     String serverVersionFilePath = conf.getServerVersionFilePath();
     String serverVersionFilePath = conf.getServerVersionFilePath();
     stackRoot = new File(stackPath);
     stackRoot = new File(stackPath);
+    if(commonServicesPath != null && !commonServicesPath.isEmpty()) {
+      commonServicesRoot = new File(commonServicesPath);
+    }
     serverVersionFile = new File(serverVersionFilePath);
     serverVersionFile = new File(serverVersionFilePath);
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
     os_family = new OsFamily(conf);
     os_family = new OsFamily(conf);
     ALL_SUPPORTED_OS = new ArrayList<String>(os_family.os_list());
     ALL_SUPPORTED_OS = new ArrayList<String>(os_family.os_list());
   }
   }
 
 
-  public AmbariMetaInfo(File stackRoot, File serverVersionFile) throws Exception {
+  public AmbariMetaInfo(File stackRoot, File commonServicesRoot, File serverVersionFile) throws Exception {
     this.stackRoot = stackRoot;
     this.stackRoot = stackRoot;
+    this.commonServicesRoot = commonServicesRoot;
     this.serverVersionFile = serverVersionFile;
     this.serverVersionFile = serverVersionFile;
   }
   }
 
 
@@ -196,7 +202,7 @@ public class AmbariMetaInfo {
     // Need to be initialized before all actions
     // Need to be initialized before all actions
     ALL_SUPPORTED_OS = new ArrayList<String>(os_family.os_list());
     ALL_SUPPORTED_OS = new ArrayList<String>(os_family.os_list());
     readServerVersion();
     readServerVersion();
-    stackManager = new StackManager(stackRoot,
+    stackManager = new StackManager(stackRoot,commonServicesRoot,
         new StackContext(metaInfoDAO, actionMetadata, os_family));
         new StackContext(metaInfoDAO, actionMetadata, os_family));
     getCustomActionDefinitions(customActionRoot);
     getCustomActionDefinitions(customActionRoot);
   }
   }

+ 9 - 0
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -98,6 +98,7 @@ public class Configuration {
   public static final String SRVR_DISABLED_PROTOCOLS = "security.server.disabled.protocols";
   public static final String SRVR_DISABLED_PROTOCOLS = "security.server.disabled.protocols";
   public static final String RESOURCES_DIR_KEY = "resources.dir";
   public static final String RESOURCES_DIR_KEY = "resources.dir";
   public static final String METADETA_DIR_PATH = "metadata.path";
   public static final String METADETA_DIR_PATH = "metadata.path";
+  public static final String COMMON_SERVICES_DIR_PATH = "common.services.path";
   public static final String SERVER_VERSION_FILE = "server.version.file";
   public static final String SERVER_VERSION_FILE = "server.version.file";
   public static final String SERVER_VERSION_KEY = "version";
   public static final String SERVER_VERSION_KEY = "version";
   public static final String JAVA_HOME_KEY = "java.home";
   public static final String JAVA_HOME_KEY = "java.home";
@@ -679,6 +680,14 @@ public class Configuration {
     return properties.getProperty(METADETA_DIR_PATH);
     return properties.getProperty(METADETA_DIR_PATH);
   }
   }
 
 
+  /**
+   * Gets ambari common services path
+   * @return String
+   */
+  public String getCommonServicesPath() {
+    return properties.getProperty(COMMON_SERVICES_DIR_PATH);
+  }
+
   public String getServerVersionFilePath() {
   public String getServerVersionFilePath() {
     return properties.getProperty(SERVER_VERSION_FILE);
     return properties.getProperty(SERVER_VERSION_FILE);
   }
   }

+ 23 - 5
ambari-server/src/main/java/org/apache/ambari/server/stack/BaseModule.java

@@ -31,28 +31,46 @@ import java.util.Set;
  * Provides functionality that is common across multiple modules.
  * Provides functionality that is common across multiple modules.
  */
  */
 public abstract class BaseModule<T, I> implements StackDefinitionModule<T, I> {
 public abstract class BaseModule<T, I> implements StackDefinitionModule<T, I> {
+
+  /**
+   * Module visitation state
+   */
+  protected ModuleState moduleState = ModuleState.INIT;
+
+  /**
+   * Module state.
+   * Initial state is INIT.
+   * When resolve is called state is set to VISITED.
+   * When resolve completes, state is set to RESOLVED.
+   *
+   * @return the module's state
+   */
+  public ModuleState getModuleState() {
+    return moduleState;
+  }
+
   /**
   /**
    * Merges child modules with the corresponding parent modules.
    * Merges child modules with the corresponding parent modules.
    *
    *
    * @param allStacks      collection of all stack module in stack definition
    * @param allStacks      collection of all stack module in stack definition
+   * @param commonServices collection of all common service module in stack definition
    * @param modules        child modules of this module that are to be merged
    * @param modules        child modules of this module that are to be merged
    * @param parentModules  parent modules which the modules are to be merged with
    * @param parentModules  parent modules which the modules are to be merged with
    *
    *
    * @return collection of the merged modules
    * @return collection of the merged modules
    */
    */
   protected <T extends StackDefinitionModule<T, ?>> Collection<T> mergeChildModules(
   protected <T extends StackDefinitionModule<T, ?>> Collection<T> mergeChildModules(
-      Map<String, StackModule> allStacks, Map<String, T> modules, Map<String, T> parentModules)
+      Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, T> modules, Map<String, T> parentModules)
         throws AmbariException {
         throws AmbariException {
-
     Set<String> addedModules = new HashSet<String>();
     Set<String> addedModules = new HashSet<String>();
     Collection<T> mergedModules = new HashSet<T>();
     Collection<T> mergedModules = new HashSet<T>();
 
 
     for (T module : modules.values()) {
     for (T module : modules.values()) {
       String id = module.getId();
       String id = module.getId();
       addedModules.add(id);
       addedModules.add(id);
-      if (! module.isDeleted()) {
+      if (!module.isDeleted()) {
         if (parentModules.containsKey(id)) {
         if (parentModules.containsKey(id)) {
-          module.resolve(parentModules.get(id), allStacks);
+          module.resolve(parentModules.get(id), allStacks, commonServices);
         }
         }
         mergedModules.add(module);
         mergedModules.add(module);
       }
       }
@@ -61,7 +79,7 @@ public abstract class BaseModule<T, I> implements StackDefinitionModule<T, I> {
     // add non-overlapping parent modules
     // add non-overlapping parent modules
     for (T parentModule : parentModules.values()) {
     for (T parentModule : parentModules.values()) {
       String id = parentModule.getId();
       String id = parentModule.getId();
-      if (! addedModules.contains(id)) {
+      if (!addedModules.contains(id)) {
         mergedModules.add(parentModule);
         mergedModules.add(parentModule);
       }
       }
     }
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/ComponentModule.java

@@ -47,7 +47,7 @@ public class ComponentModule extends BaseModule<ComponentModule, ComponentInfo>
   }
   }
 
 
   @Override
   @Override
-  public void resolve(ComponentModule parent, Map<String, StackModule> allStacks) {
+  public void resolve(ComponentModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) {
     ComponentInfo parentInfo = parent.getModuleInfo();
     ComponentInfo parentInfo = parent.getModuleInfo();
 
 
     if (componentInfo.getCommandScript() == null) {
     if (componentInfo.getCommandScript() == null) {

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationModule.java

@@ -61,7 +61,7 @@ public class ConfigurationModule extends BaseModule<ConfigurationModule, Configu
   }
   }
 
 
   @Override
   @Override
-  public void resolve(ConfigurationModule parent, Map<String, StackModule> allStacks) throws AmbariException {
+  public void resolve(ConfigurationModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException {
     // merge properties also removes deleted props so should be called even if extension is disabled
     // merge properties also removes deleted props so should be called even if extension is disabled
     mergeProperties(parent);
     mergeProperties(parent);
 
 

+ 29 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/ModuleState.java

@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+/**
+ * Module visitation state enum used for cycle detection
+ */
+public enum ModuleState
+{
+  INIT,
+  VISITED,
+  RESOLVED
+}

+ 76 - 9
ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java

@@ -63,6 +63,10 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> {
    */
    */
   private ServiceDirectory serviceDirectory;
   private ServiceDirectory serviceDirectory;
 
 
+  /**
+   * Flag to mark a service as a common service
+   */
+  private boolean isCommonService;
 
 
   /**
   /**
    * Constructor.
    * Constructor.
@@ -72,9 +76,23 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> {
    * @param serviceDirectory  used for all IO interaction with service directory in stack definition
    * @param serviceDirectory  used for all IO interaction with service directory in stack definition
    */
    */
   public ServiceModule(StackContext stackContext, ServiceInfo serviceInfo, ServiceDirectory serviceDirectory) {
   public ServiceModule(StackContext stackContext, ServiceInfo serviceInfo, ServiceDirectory serviceDirectory) {
+    this(stackContext, serviceInfo, serviceDirectory, false);
+  }
+
+  /**
+   * Constructor.
+   *
+   * @param stackContext      stack context which provides module access to external functionality
+   * @param serviceInfo       associated service info
+   * @param serviceDirectory  used for all IO interaction with service directory in stack definition
+   * @param isCommonService   flag to mark a service as a common service
+   */
+  public ServiceModule(
+      StackContext stackContext, ServiceInfo serviceInfo, ServiceDirectory serviceDirectory, boolean isCommonService) {
     this.serviceInfo = serviceInfo;
     this.serviceInfo = serviceInfo;
     this.stackContext = stackContext;
     this.stackContext = stackContext;
     this.serviceDirectory = serviceDirectory;
     this.serviceDirectory = serviceDirectory;
+    this.isCommonService = isCommonService;
 
 
     serviceInfo.setMetricsFile(serviceDirectory.getMetricsFile());
     serviceInfo.setMetricsFile(serviceDirectory.getMetricsFile());
     serviceInfo.setAlertsFile(serviceDirectory.getAlertsFile());
     serviceInfo.setAlertsFile(serviceDirectory.getAlertsFile());
@@ -91,7 +109,9 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> {
   }
   }
 
 
   @Override
   @Override
-  public void resolve(ServiceModule parentModule, Map<String, StackModule> allStacks) throws AmbariException {
+  public void resolve(
+      ServiceModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
     ServiceInfo parent = parentModule.getModuleInfo();
     ServiceInfo parent = parentModule.getModuleInfo();
 
 
     if (serviceInfo.getComment() == null) {
     if (serviceInfo.getComment() == null) {
@@ -135,8 +155,46 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> {
 
 
     mergeCustomCommands(parent.getCustomCommands(), serviceInfo.getCustomCommands());
     mergeCustomCommands(parent.getCustomCommands(), serviceInfo.getCustomCommands());
     mergeConfigDependencies(parent);
     mergeConfigDependencies(parent);
-    mergeComponents(parentModule, allStacks);
-    mergeConfigurations(parentModule, allStacks);
+    mergeComponents(parentModule, allStacks, commonServices);
+    mergeConfigurations(parentModule, allStacks, commonServices);
+  }
+
+  /**
+   * Resolve common service
+   * @param allStacks       all stack modules
+   * @param commonServices  common service modules
+   *
+   * @throws AmbariException
+   */
+  public void resolveCommonService(Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
+    if(!isCommonService) {
+      throw new AmbariException("Not a common service");
+    }
+    moduleState = ModuleState.VISITED;
+    String parentString = serviceInfo.getParent();
+    if(parentString != null) {
+      String[] parentToks = parentString.split(StackManager.PATH_DELIMITER);
+      if(parentToks.length != 3) {
+        throw new AmbariException("The common service '" + serviceInfo.getName() + serviceInfo.getVersion()
+            + "' extends an invalid parent: '" + parentString + "'");
+      }
+      if (parentToks[0].equalsIgnoreCase(StackManager.COMMON_SERVICES)) {
+        String baseServiceKey = parentToks[1] + StackManager.PATH_DELIMITER + parentToks[2];
+        ServiceModule baseService = commonServices.get(baseServiceKey);
+        ModuleState baseModuleState = baseService.getModuleState();
+        if (baseModuleState == ModuleState.INIT) {
+          baseService.resolveCommonService(allStacks, commonServices);
+        } else if (baseModuleState == ModuleState.VISITED) {
+          //todo: provide more information to user about cycle
+          throw new AmbariException("Cycle detected while parsing common service");
+        }
+        resolve(baseService, allStacks, commonServices);
+      } else {
+        throw new AmbariException("Common service cannot inherit from a non common service");
+      }
+    }
+    moduleState = ModuleState.RESOLVED;
   }
   }
 
 
   @Override
   @Override
@@ -222,15 +280,18 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> {
    * Merge configurations with the parent configurations.
    * Merge configurations with the parent configurations.
    * This will update the child configuration module set as well as the underlying info instances.
    * This will update the child configuration module set as well as the underlying info instances.
    *
    *
-   * @param parent  parent service module
-   * @param stacks  all stack modules
+   * @param parent          parent service module
+   * @param allStacks       all stack modules
+   * @param commonServices  common service modules
    */
    */
-  private void mergeConfigurations(ServiceModule parent, Map<String, StackModule> stacks) throws AmbariException {
+  private void mergeConfigurations(
+      ServiceModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
     serviceInfo.getProperties().clear();
     serviceInfo.getProperties().clear();
     serviceInfo.setAllConfigAttributes(new HashMap<String, Map<String, Map<String, String>>>());
     serviceInfo.setAllConfigAttributes(new HashMap<String, Map<String, Map<String, String>>>());
 
 
     Collection<ConfigurationModule> mergedModules = mergeChildModules(
     Collection<ConfigurationModule> mergedModules = mergeChildModules(
-        stacks, configurationModules, parent.configurationModules);
+        allStacks, commonServices, configurationModules, parent.configurationModules);
 
 
     for (ConfigurationModule module : mergedModules) {
     for (ConfigurationModule module : mergedModules) {
       configurationModules.put(module.getId(), module);
       configurationModules.put(module.getId(), module);
@@ -242,11 +303,17 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> {
   /**
   /**
    * Merge components with the parent configurations.
    * Merge components with the parent configurations.
    * This will update the child component module set as well as the underlying info instances.
    * This will update the child component module set as well as the underlying info instances.
+   *
+   * @param parent          parent service module
+   * @param allStacks       all stack modules
+   * @param commonServices  common service modules
    */
    */
-  private void mergeComponents(ServiceModule parent, Map<String, StackModule> stacks) throws AmbariException {
+  private void mergeComponents(
+      ServiceModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
     serviceInfo.getComponents().clear();
     serviceInfo.getComponents().clear();
     Collection<ComponentModule> mergedModules = mergeChildModules(
     Collection<ComponentModule> mergedModules = mergeChildModules(
-        stacks, componentModules, parent.componentModules);
+        allStacks, commonServices, componentModules, parent.componentModules);
 
 
     for (ComponentModule module : mergedModules) {
     for (ComponentModule module : mergedModules) {
       componentModules.put(module.getId(), module);
       componentModules.put(module.getId(), module);

+ 14 - 3
ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java

@@ -34,12 +34,13 @@ public interface StackDefinitionModule <T, I> {
   /**
   /**
    * Resolve the module state with the specified parent.
    * Resolve the module state with the specified parent.
    *
    *
-   * @param parent     the parent that this module will be merged with
-   * @param allStacks  collection of all stack modules in the tree
+   * @param parent          the parent that this module will be merged with
+   * @param allStacks       collection of all stack modules in the tree
+   * @param commonServices  collection of all common service modules in the tree
    *
    *
    * @throws AmbariException if resolution fails
    * @throws AmbariException if resolution fails
    */
    */
-  public void resolve(T parent, Map<String, StackModule> allStacks) throws AmbariException;
+  public void resolve(T parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException;
 
 
   /**
   /**
    * Obtain the associated module information.
    * Obtain the associated module information.
@@ -66,4 +67,14 @@ public interface StackDefinitionModule <T, I> {
    * Lifecycle even which is called when the associated stack has been fully resolved.
    * Lifecycle even which is called when the associated stack has been fully resolved.
    */
    */
   public void finalizeModule();
   public void finalizeModule();
+
+  /**
+   * Module state.
+   * Initial state is INIT.
+   * When resolve is called state is set to VISITED.
+   * When resolve completes, state is set to RESOLVED.
+   *
+   * @return the module state
+   */
+  public ModuleState getModuleState();
 }
 }

+ 143 - 33
ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java

@@ -21,7 +21,9 @@ package org.apache.ambari.server.stack;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -37,6 +39,20 @@ import java.util.Map;
  * stack information.
  * stack information.
  */
  */
 public class StackManager {
 public class StackManager {
+
+  /**
+   * Delimiter used for parent path string
+   * Example:
+   *  HDP/2.0.6/HDFS
+   *  common-services/HDFS/2.1.0.2.0
+   */
+  public static String PATH_DELIMITER = "/";
+
+  /**
+   * Prefix used for common services parent path string
+   */
+  public static final String COMMON_SERVICES = "common-services";
+
   /**
   /**
    * Provides access to non-stack server functionality
    * Provides access to non-stack server functionality
    */
    */
@@ -48,48 +64,32 @@ public class StackManager {
   private final static Logger LOG = LoggerFactory.getLogger(StackManager.class);
   private final static Logger LOG = LoggerFactory.getLogger(StackManager.class);
 
 
   /**
   /**
-   * Map of stack name to stack info
+   * Map of stack id to stack info
    */
    */
   private Map<String, StackInfo> stackMap = new HashMap<String, StackInfo>();
   private Map<String, StackInfo> stackMap = new HashMap<String, StackInfo>();
 
 
-
   /**
   /**
    * Constructor.
    * Constructor.
+   * Initialize stack manager.
    *
    *
-   * @param stackRoot     stack root directory
-   * @param stackContext  context which provides external functionality
+   * @param stackRoot           stack root directory
+   * @param commonServicesRoot  common services root directory
+   * @param stackContext        context which provides external functionality
    *
    *
    * @throws AmbariException if an exception occurs while processing the stacks
    * @throws AmbariException if an exception occurs while processing the stacks
    */
    */
-  public StackManager(File stackRoot, StackContext stackContext) throws AmbariException {
+  public StackManager(File stackRoot, File commonServicesRoot, StackContext stackContext) throws AmbariException {
     validateStackDirectory(stackRoot);
     validateStackDirectory(stackRoot);
+    validateCommonServicesDirectory(commonServicesRoot);
 
 
+    this.stackMap = new HashMap<String, StackInfo>();
     this.stackContext = stackContext;
     this.stackContext = stackContext;
-    Map<String, StackModule> stackModules = new HashMap<String, StackModule>();
-    File[] stackFiles = stackRoot.listFiles(AmbariMetaInfo.FILENAME_FILTER);
-    for (File stack : stackFiles) {
-      if (stack.isFile()) {
-        continue;
-      }
-      for (File stackFolder : stack.listFiles(AmbariMetaInfo.FILENAME_FILTER)) {
-        if (stackFolder.isFile()) {
-          continue;
-        }
-        String stackName = stackFolder.getParentFile().getName();
-        String stackVersion = stackFolder.getName();
 
 
-        StackModule stackModule = new StackModule(new StackDirectory(stackFolder.getPath()),stackContext);
-        stackModules.put(stackName + stackVersion, stackModule);
-        stackMap.put(stackName + stackVersion, stackModule.getModuleInfo());
-      }
-    }
-
-    if (stackMap.isEmpty()) {
-      throw new AmbariException("Unable to find stack definitions under " +
-          "stackRoot = " + stackRoot.getAbsolutePath());
-    }
+    Map<String, ServiceModule> commonServiceModules = parseCommonServicesDirectory(commonServicesRoot);
+    Map<String, StackModule> stackModules = parseStackDirectory(stackRoot);
 
 
-    fullyResolveStacks(stackModules);
+    fullyResolveCommonServices(stackModules, commonServiceModules);
+    fullyResolveStacks(stackModules, commonServiceModules);
   }
   }
 
 
   /**
   /**
@@ -101,7 +101,7 @@ public class StackManager {
    *         If no matching stack exists, null is returned.
    *         If no matching stack exists, null is returned.
    */
    */
   public StackInfo getStack(String name, String version) {
   public StackInfo getStack(String name, String version) {
-    return stackMap.get(name + version);
+    return stackMap.get(name + StackManager.PATH_DELIMITER + version);
   }
   }
 
 
   /**
   /**
@@ -142,21 +142,63 @@ public class StackManager {
   /**
   /**
    * Fully resolve all stacks.
    * Fully resolve all stacks.
    *
    *
-   * @param stackModules  map of stack id which contains name and version to stack module.
+   * @param stackModules          map of stack id which contains name and version to stack module.
+   * @param commonServiceModules  map of common service id which contains name and version to stack module.
    * @throws AmbariException if unable to resolve all stacks
    * @throws AmbariException if unable to resolve all stacks
    */
    */
-  private void fullyResolveStacks(Map<String, StackModule> stackModules) throws AmbariException {
+  private void fullyResolveStacks(
+      Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules)
+      throws AmbariException {
     for (StackModule stack : stackModules.values()) {
     for (StackModule stack : stackModules.values()) {
-      if (stack.getResolutionState() == StackModule.State.INIT) {
-        stack.resolve(null, stackModules);
+      if (stack.getModuleState() == ModuleState.INIT) {
+        stack.resolve(null, stackModules, commonServiceModules);
       }
       }
     }
     }
     // execute all of the repo tasks in a single thread executor
     // execute all of the repo tasks in a single thread executor
     stackContext.executeRepoTasks();
     stackContext.executeRepoTasks();
   }
   }
 
 
+  /**
+   * Fully resolve common services.
+   *
+   * @param stackModules          map of stack id which contains name and version to stack module.
+   * @param commonServiceModules  map of common service id which contains name and version to common service module.
+   * @throws AmbariException if unable to resolve all common services
+   */
+  private void fullyResolveCommonServices(
+      Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules)
+      throws AmbariException {
+    for(ServiceModule commonService : commonServiceModules.values()) {
+      if (commonService.getModuleState() == ModuleState.INIT) {
+        commonService.resolveCommonService(stackModules, commonServiceModules);
+      }
+    }
+  }
+
+  /**
+   * Validate that the specified common services root is a valid directory.
+   *
+   * @param commonServicesRoot the common services root directory to validate
+   * @throws AmbariException if the specified common services root directory is invalid
+   */
+  private void validateCommonServicesDirectory(File commonServicesRoot) throws AmbariException {
+    if(commonServicesRoot != null) {
+      String commonServicesRootAbsolutePath = commonServicesRoot.getAbsolutePath();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Loading common services information"
+            + ", commonServicesRoot = " + commonServicesRootAbsolutePath);
+      }
+
+      if (!commonServicesRoot.isDirectory() && !commonServicesRoot.exists())
+        throw new AmbariException("" + Configuration.COMMON_SERVICES_DIR_PATH
+            + " should be a directory with common services"
+            + ", commonServicesRoot = " + commonServicesRootAbsolutePath);
+    }
+  }
+
   /**
   /**
    * Validate that the specified stack root is a valid directory.
    * Validate that the specified stack root is a valid directory.
+   *
    * @param stackRoot  the stack root directory to validate
    * @param stackRoot  the stack root directory to validate
    * @throws AmbariException if the specified stack root directory is invalid
    * @throws AmbariException if the specified stack root directory is invalid
    */
    */
@@ -172,4 +214,72 @@ public class StackManager {
           + " should be a directory with stack"
           + " should be a directory with stack"
           + ", stackRoot = " + stackRootAbsPath);
           + ", stackRoot = " + stackRootAbsPath);
   }
   }
+
+  /**
+   * Parse the specified common services root directory
+   *
+   * @param commonServicesRoot  the common services root directory to parse
+   * @return map of common service id which contains name and version to common service module.
+   * @throws AmbariException if unable to parse all common services
+   */
+  private Map<String, ServiceModule> parseCommonServicesDirectory(File commonServicesRoot) throws AmbariException {
+    Map<String, ServiceModule> commonServiceModules = new HashMap<String, ServiceModule>();
+
+    if(commonServicesRoot != null) {
+      File[] commonServiceFiles = commonServicesRoot.listFiles(AmbariMetaInfo.FILENAME_FILTER);
+      for (File commonService : commonServiceFiles) {
+        if (commonService.isFile()) {
+          continue;
+        }
+        for (File serviceFolder : commonService.listFiles(AmbariMetaInfo.FILENAME_FILTER)) {
+          String serviceName = serviceFolder.getParentFile().getName();
+          String serviceVersion = serviceFolder.getName();
+          ServiceDirectory serviceDirectory = new ServiceDirectory(serviceFolder.getPath());
+          ServiceMetainfoXml metaInfoXml = serviceDirectory.getMetaInfoFile();
+          for (ServiceInfo serviceInfo : metaInfoXml.getServices()) {
+            ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory, true);
+            String commonServiceKey = serviceName + StackManager.PATH_DELIMITER + serviceVersion;
+            commonServiceModules.put(commonServiceKey, serviceModule);
+          }
+        }
+      }
+    }
+    return commonServiceModules;
+  }
+
+  /**
+   * Parse the specified stack root directory
+   *
+   * @param stackRoot  the stack root directory to parse
+   * @return map of stack id which contains name and version to stack module.
+   * @throws AmbariException if unable to parse all stacks
+   */
+  private Map<String, StackModule> parseStackDirectory(File stackRoot) throws AmbariException {
+    Map<String, StackModule> stackModules = new HashMap<String, StackModule>();
+
+    File[] stackFiles = stackRoot.listFiles(AmbariMetaInfo.FILENAME_FILTER);
+    for (File stack : stackFiles) {
+      if (stack.isFile()) {
+        continue;
+      }
+      for (File stackFolder : stack.listFiles(AmbariMetaInfo.FILENAME_FILTER)) {
+        if (stackFolder.isFile()) {
+          continue;
+        }
+        String stackName = stackFolder.getParentFile().getName();
+        String stackVersion = stackFolder.getName();
+
+        StackModule stackModule = new StackModule(new StackDirectory(stackFolder.getPath()), stackContext);
+        String stackKey = stackName + StackManager.PATH_DELIMITER + stackVersion;
+        stackModules.put(stackKey, stackModule);
+        stackMap.put(stackKey, stackModule.getModuleInfo());
+      }
+    }
+
+    if (stackMap.isEmpty()) {
+      throw new AmbariException("Unable to find stack definitions under " +
+          "stackRoot = " + stackRoot.getAbsolutePath());
+    }
+    return stackModules;
+  }
 }
 }

+ 133 - 60
ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java

@@ -71,15 +71,6 @@ import java.util.Map;
  *
  *
  */
  */
 public class StackModule extends BaseModule<StackModule, StackInfo> {
 public class StackModule extends BaseModule<StackModule, StackInfo> {
-  /**
-   * Visitation state enum used for cycle detection
-   */
-  public enum State { INIT, VISITED, RESOLVED }
-
-  /**
-   * Visitation state of the stack
-   */
-  private State resolutionState = State.INIT;
 
 
   /**
   /**
    * Context which provides access to external functionality
    * Context which provides access to external functionality
@@ -136,23 +127,26 @@ public class StackModule extends BaseModule<StackModule, StackInfo> {
    * same stack hierarchy or may explicitly extend a service in a stack in a different
    * same stack hierarchy or may explicitly extend a service in a stack in a different
    * hierarchy.
    * hierarchy.
    *
    *
-   * @param parentModule  not used.  Each stack determines its own parent since stacks don't
-   *                      have containing modules
-   * @param allStacks     all stacks modules contained in the stack definition
+   * @param parentModule   not used.  Each stack determines its own parent since stacks don't
+   *                       have containing modules
+   * @param allStacks      all stacks modules contained in the stack definition
+   * @param commonServices all common services specified in the stack definition
    *
    *
    * @throws AmbariException if an exception occurs during stack resolution
    * @throws AmbariException if an exception occurs during stack resolution
    */
    */
   @Override
   @Override
-  public void resolve(StackModule parentModule, Map<String, StackModule> allStacks) throws AmbariException {
-    resolutionState = State.VISITED;
+  public void resolve(
+      StackModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
+    moduleState = ModuleState.VISITED;
     String parentVersion = stackInfo.getParentStackVersion();
     String parentVersion = stackInfo.getParentStackVersion();
     // merge with parent version of same stack definition
     // merge with parent version of same stack definition
     if (parentVersion != null) {
     if (parentVersion != null) {
-      mergeStackWithParent(allStacks, parentVersion);
+      mergeStackWithParent(parentVersion, allStacks, commonServices);
     }
     }
-    mergeServicesWithExplicitParent(allStacks);
+    mergeServicesWithExplicitParent(allStacks, commonServices);
     processRepositories();
     processRepositories();
-    resolutionState = State.RESOLVED;
+    moduleState = ModuleState.RESOLVED;
 
 
     finalizeModule();
     finalizeModule();
   }
   }
@@ -187,28 +181,20 @@ public class StackModule extends BaseModule<StackModule, StackInfo> {
     return stackDirectory;
     return stackDirectory;
   }
   }
 
 
-  /**
-   * Stack resolution state.
-   * Initial state is INIT.
-   * When resolve is called state is set to VISITED.
-   * When resolve completes, state is set to RESOLVED.
-   *
-   * @return the stacks resolution state
-   */
-  public State getResolutionState() {
-    return resolutionState;
-  }
-
   /**
   /**
    * Merge the stack with its parent.
    * Merge the stack with its parent.
    *
    *
    * @param allStacks      all stacks in stack definition
    * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
    * @param parentVersion  version of the stacks parent
    * @param parentVersion  version of the stacks parent
    *
    *
    * @throws AmbariException if an exception occurs merging with the parent
    * @throws AmbariException if an exception occurs merging with the parent
    */
    */
-  private void mergeStackWithParent(Map<String, StackModule> allStacks, String parentVersion) throws AmbariException {
-    String parentStackKey = stackInfo.getName() + parentVersion;
+  private void mergeStackWithParent(
+      String parentVersion, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
+
+    String parentStackKey = stackInfo.getName() + StackManager.PATH_DELIMITER + parentVersion;
     StackModule parentStack = allStacks.get(parentStackKey);
     StackModule parentStack = allStacks.get(parentStackKey);
 
 
     if (parentStack == null) {
     if (parentStack == null) {
@@ -216,27 +202,31 @@ public class StackModule extends BaseModule<StackModule, StackInfo> {
           "' specifies a parent that doesn't exist");
           "' specifies a parent that doesn't exist");
     }
     }
 
 
-    resolveStack(parentStack, allStacks);
-    mergeConfigurations(parentStack, allStacks);
+    resolveStack(parentStack, allStacks, commonServices);
+    mergeConfigurations(parentStack, allStacks, commonServices);
     mergeRoleCommandOrder(parentStack);
     mergeRoleCommandOrder(parentStack);
 
 
     if (stackInfo.getStackHooksFolder() == null) {
     if (stackInfo.getStackHooksFolder() == null) {
       stackInfo.setStackHooksFolder(parentStack.getModuleInfo().getStackHooksFolder());
       stackInfo.setStackHooksFolder(parentStack.getModuleInfo().getStackHooksFolder());
     }
     }
-    mergeServicesWithParent(allStacks, parentStack);
+    mergeServicesWithParent(parentStack, allStacks, commonServices);
   }
   }
 
 
   /**
   /**
    * Merge child services with parent stack.
    * Merge child services with parent stack.
    *
    *
-   * @param stacks       all stacks in stack definition
-   * @param parentStack  parent stack module
+   * @param parentStack    parent stack module
+   * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
    *
    *
    * @throws AmbariException if an exception occurs merging the child services with the parent stack
    * @throws AmbariException if an exception occurs merging the child services with the parent stack
    */
    */
-  private void mergeServicesWithParent(Map<String, StackModule> stacks, StackModule parentStack) throws AmbariException {
+  private void mergeServicesWithParent(
+      StackModule parentStack, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
     stackInfo.getServices().clear();
     stackInfo.getServices().clear();
-    Collection<ServiceModule> mergedModules = mergeChildModules(stacks, serviceModules, parentStack.serviceModules);
+    Collection<ServiceModule> mergedModules = mergeChildModules(
+        allStacks, commonServices, serviceModules, parentStack.serviceModules);
     for (ServiceModule module : mergedModules) {
     for (ServiceModule module : mergedModules) {
       serviceModules.put(module.getId(), module);
       serviceModules.put(module.getId(), module);
       stackInfo.getServices().add(module.getModuleInfo());
       stackInfo.getServices().add(module.getModuleInfo());
@@ -245,52 +235,129 @@ public class StackModule extends BaseModule<StackModule, StackInfo> {
 
 
   /**
   /**
    * Merge services with their explicitly specified parent if one has been specified.
    * Merge services with their explicitly specified parent if one has been specified.
-   *
-   * @param stacks  all stacks specified in the stack definition
+   * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
    *
    *
    * @throws AmbariException if an exception occurs while merging child services with their explicit parents
    * @throws AmbariException if an exception occurs while merging child services with their explicit parents
    */
    */
-  private void mergeServicesWithExplicitParent(Map<String, StackModule> stacks) throws AmbariException {
+  private void mergeServicesWithExplicitParent(
+      Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException {
     for (ServiceModule service : serviceModules.values()) {
     for (ServiceModule service : serviceModules.values()) {
       ServiceInfo serviceInfo = service.getModuleInfo();
       ServiceInfo serviceInfo = service.getModuleInfo();
       String parent = serviceInfo.getParent();
       String parent = serviceInfo.getParent();
       if (parent != null) {
       if (parent != null) {
-        mergeServiceWithExplicitParent(stacks, service, parent);
+        mergeServiceWithExplicitParent(service, parent, allStacks, commonServices);
       }
       }
     }
     }
   }
   }
 
 
   /**
   /**
    * Merge a service with its explicitly specified parent.
    * Merge a service with its explicitly specified parent.
-   * @param stacks   all stacks specified in the stack definition
-   * @param service  the service to merge
-   * @param parent   the explicitly specified parent service
+   * @param service          the service to merge
+   * @param parent           the explicitly specified parent service
+   * @param allStacks        all stacks specified in the stack definition
+   * @param commonServices   all common services specified in the stack definition
    *
    *
    * @throws AmbariException if an exception occurs merging a service with its explicit parent
    * @throws AmbariException if an exception occurs merging a service with its explicit parent
    */
    */
-  private void mergeServiceWithExplicitParent(Map<String, StackModule> stacks, ServiceModule service, String parent)
+  private void mergeServiceWithExplicitParent(
+      ServiceModule service, String parent, Map<String, StackModule> allStacks,
+      Map<String, ServiceModule> commonServices)
+      throws AmbariException {
+    if(isCommonServiceParent(parent)) {
+      mergeServiceWithCommonServiceParent(service, parent, allStacks,commonServices);
+    } else {
+      mergeServiceWithStackServiceParent(service, parent, allStacks, commonServices);
+    }
+  }
+
+  /**
+   * Check if parent is common service
+   * @param parent  Parent string
+   * @return true: if parent is common service, false otherwise
+   */
+  private boolean isCommonServiceParent(String parent) {
+    return parent != null
+        && !parent.isEmpty()
+        && parent.split(StackManager.PATH_DELIMITER)[0].equalsIgnoreCase(StackManager.COMMON_SERVICES);
+  }
+
+  /**
+   * Merge a service with its explicitly specified common service as parent.
+   * Parent: common-services/<serviceName>/<serviceVersion>
+   * Common Services Lookup Key: <serviceName>/<serviceVersion>
+   * Example:
+   *  Parent: common-services/HDFS/2.1.0.2.0
+   *  Key: HDFS/2.1.0.2.0
+   *
+   * @param service          the service to merge
+   * @param parent           the explicitly specified common service as parent
+   * @param allStacks        all stacks specified in the stack definition
+   * @param commonServices   all common services specified in the stack definition
+   * @throws AmbariException
+   */
+  private void mergeServiceWithCommonServiceParent(
+      ServiceModule service, String parent, Map<String, StackModule> allStacks,
+      Map<String, ServiceModule> commonServices)
       throws AmbariException {
       throws AmbariException {
+    ServiceInfo serviceInfo = service.getModuleInfo();
+    String[] parentToks = parent.split(StackManager.PATH_DELIMITER);
+    if(parentToks.length != 3 || !parentToks[0].equalsIgnoreCase(StackManager.COMMON_SERVICES)) {
+      throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
+          + stackInfo.getVersion() + "' extends an invalid parent: '" + parent + "'");
+    }
+
+    String baseServiceKey = parentToks[1] + StackManager.PATH_DELIMITER + parentToks[2];
+    ServiceModule baseService = commonServices.get(baseServiceKey);
+    if (baseService == null) {
+      throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
+          + stackInfo.getVersion() + "' extends a non-existent service: '" + parent + "'");
+    }
+    service.resolve(baseService, allStacks, commonServices);
+  }
 
 
+  /**
+   * Merge a service with its explicitly specified stack service as parent.
+   * Parent: <stackName>/<stackVersion>/<serviceName>
+   * Stack Lookup Key: <stackName>/<stackVersion>
+   * Example:
+   *  Parent: HDP/2.0.6/HDFS
+   *  Key: HDP/2.0.6
+   *
+   * @param service          the service to merge
+   * @param parent           the explicitly specified stack service as parent
+   * @param allStacks        all stacks specified in the stack definition
+   * @param commonServices   all common services specified in the stack definition
+   * @throws AmbariException
+   */
+  private void mergeServiceWithStackServiceParent(
+      ServiceModule service, String parent, Map<String, StackModule> allStacks,
+      Map<String, ServiceModule> commonServices)
+      throws AmbariException {
     ServiceInfo serviceInfo = service.getModuleInfo();
     ServiceInfo serviceInfo = service.getModuleInfo();
-    String[] parentToks = parent.split("/");
-    String baseStackKey = parentToks[0] + parentToks[1];
-    StackModule baseStack = stacks.get(baseStackKey);
+    String[] parentToks = parent.split(StackManager.PATH_DELIMITER);
+    if(parentToks.length != 3 || parentToks[0].equalsIgnoreCase(StackManager.COMMON_SERVICES)) {
+      throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
+          + stackInfo.getVersion() + "' extends an invalid parent: '" + parent + "'");
+    }
+
+    String baseStackKey = parentToks[0] + StackManager.PATH_DELIMITER + parentToks[1];
+    StackModule baseStack = allStacks.get(baseStackKey);
     if (baseStack == null) {
     if (baseStack == null) {
       throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
       throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
           + stackInfo.getVersion() + "' extends a service in a non-existent stack: '" + baseStackKey + "'");
           + stackInfo.getVersion() + "' extends a service in a non-existent stack: '" + baseStackKey + "'");
     }
     }
 
 
-    resolveStack(baseStack, stacks);
+    resolveStack(baseStack, allStacks, commonServices);
 
 
     ServiceModule baseService = baseStack.serviceModules.get(parentToks[2]);
     ServiceModule baseService = baseStack.serviceModules.get(parentToks[2]);
     if (baseService == null) {
     if (baseService == null) {
       throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
       throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
           + stackInfo.getVersion() + "' extends a non-existent service: '" + parent + "'");
           + stackInfo.getVersion() + "' extends a non-existent service: '" + parent + "'");
     }
     }
-    service.resolve(baseService, stacks);
+    service.resolve(baseService, allStacks, commonServices);
   }
   }
 
 
-
   /**
   /**
    * Populate the stack module and info from the stack definition.
    * Populate the stack module and info from the stack definition.
    */
    */
@@ -378,14 +445,17 @@ public class StackModule extends BaseModule<StackModule, StackInfo> {
    * Merge configurations with the parent configurations.
    * Merge configurations with the parent configurations.
    *
    *
    * @param parent  parent stack module
    * @param parent  parent stack module
-   * @param stacks  all stack modules
+   * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
    */
    */
-  private void mergeConfigurations(StackModule parent, Map<String, StackModule> stacks) throws AmbariException {
+  private void mergeConfigurations(
+      StackModule parent, Map<String,StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
     stackInfo.getProperties().clear();
     stackInfo.getProperties().clear();
     stackInfo.setAllConfigAttributes(new HashMap<String, Map<String, Map<String, String>>>());
     stackInfo.setAllConfigAttributes(new HashMap<String, Map<String, Map<String, String>>>());
 
 
     Collection<ConfigurationModule> mergedModules = mergeChildModules(
     Collection<ConfigurationModule> mergedModules = mergeChildModules(
-        stacks, configurationModules, parent.configurationModules);
+        allStacks, commonServices, configurationModules, parent.configurationModules);
     for (ConfigurationModule module : mergedModules) {
     for (ConfigurationModule module : mergedModules) {
       configurationModules.put(module.getId(), module);
       configurationModules.put(module.getId(), module);
       stackInfo.getProperties().addAll(module.getModuleInfo().getProperties());
       stackInfo.getProperties().addAll(module.getModuleInfo().getProperties());
@@ -397,13 +467,16 @@ public class StackModule extends BaseModule<StackModule, StackInfo> {
    * Resolve another stack module.
    * Resolve another stack module.
    *
    *
    * @param stackToBeResolved  stack module to be resolved
    * @param stackToBeResolved  stack module to be resolved
-   * @param stacks             all stack modules in stack definition
+   * @param allStacks          all stack modules in stack definition
+   * @param commonServices     all common services specified in the stack definition
    * @throws AmbariException if unable to resolve the stack
    * @throws AmbariException if unable to resolve the stack
    */
    */
-  private void resolveStack(StackModule stackToBeResolved, Map<String, StackModule> stacks) throws AmbariException {
-    if (stackToBeResolved.getResolutionState() == State.INIT) {
-      stackToBeResolved.resolve(null, stacks);
-    } else if (stackToBeResolved.getResolutionState() == State.VISITED) {
+  private void resolveStack(
+      StackModule stackToBeResolved, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      throws AmbariException {
+    if (stackToBeResolved.getModuleState() == ModuleState.INIT) {
+      stackToBeResolved.resolve(null, allStacks, commonServices);
+    } else if (stackToBeResolved.getModuleState() == ModuleState.VISITED) {
       //todo: provide more information to user about cycle
       //todo: provide more information to user about cycle
       throw new AmbariException("Cycle detected while parsing stack definition");
       throw new AmbariException("Cycle detected while parsing stack definition");
     }
     }

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -1735,7 +1735,7 @@ public class AmbariMetaInfoTest {
     OsFamily osFamily;
     OsFamily osFamily;
 
 
     public TestAmbariMetaInfo(File stackRoot, File serverVersionFile) throws Exception {
     public TestAmbariMetaInfo(File stackRoot, File serverVersionFile) throws Exception {
-      super(stackRoot, serverVersionFile);
+      super(stackRoot, null, serverVersionFile);
       // MetainfoDAO
       // MetainfoDAO
       metaInfoDAO = createNiceMock(MetainfoDAO.class);
       metaInfoDAO = createNiceMock(MetainfoDAO.class);
       Class<?> c = getClass().getSuperclass();
       Class<?> c = getClass().getSuperclass();

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java

@@ -217,7 +217,7 @@ public class KerberosServiceMetaInfoTest {
     OsFamily osFamily;
     OsFamily osFamily;
 
 
     public TestAmbariMetaInfo(File stackRoot, File serverVersionFile) throws Exception {
     public TestAmbariMetaInfo(File stackRoot, File serverVersionFile) throws Exception {
-      super(stackRoot, serverVersionFile);
+      super(stackRoot, null, serverVersionFile);
       // MetainfoDAO
       // MetainfoDAO
       metaInfoDAO = createNiceMock(MetainfoDAO.class);
       metaInfoDAO = createNiceMock(MetainfoDAO.class);
       Class<?> c = getClass().getSuperclass();
       Class<?> c = getClass().getSuperclass();

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -8087,7 +8087,7 @@ public class AmbariManagementControllerTest {
     assertEquals(original, repo.getDefaultBaseUrl());
     assertEquals(original, repo.getDefaultBaseUrl());
 
 
     // verify change with new meta info
     // verify change with new meta info
-    AmbariMetaInfo ami = new AmbariMetaInfo(new File("src/test/resources/stacks"), new File("target/version"));
+    AmbariMetaInfo ami = new AmbariMetaInfo(new File("src/test/resources/stacks"), null, new File("target/version"));
     injector.injectMembers(ami);
     injector.injectMembers(ami);
     ami.init();
     ami.init();
 
 

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/stack/ComponentModuleTest.java

@@ -394,7 +394,7 @@ public class ComponentModuleTest {
     ComponentModule component = new ComponentModule(info);
     ComponentModule component = new ComponentModule(info);
     ComponentModule parentComponent = new ComponentModule(parentInfo);
     ComponentModule parentComponent = new ComponentModule(parentInfo);
 
 
-    component.resolve(parentComponent, Collections.<String, StackModule>emptyMap());
+    component.resolve(parentComponent, Collections.<String, StackModule>emptyMap(), Collections.<String, ServiceModule>emptyMap());
 
 
     return component;
     return component;
   }
   }

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java

@@ -976,7 +976,7 @@ public class ServiceModuleTest {
   }
   }
 
 
   private void resolveService(ServiceModule service, ServiceModule parent) throws AmbariException {
   private void resolveService(ServiceModule service, ServiceModule parent) throws AmbariException {
-    service.resolve(parent, Collections.<String, StackModule>emptyMap());
+    service.resolve(parent, Collections.<String, StackModule>emptyMap(), Collections.<String, ServiceModule>emptyMap());
     // during runtime this would be called by the Stack module when it's resolve completed
     // during runtime this would be called by the Stack module when it's resolve completed
     service.finalizeModule();
     service.finalizeModule();
     parent.finalizeModule();
     parent.finalizeModule();

+ 167 - 0
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java

@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.*;
+
+import static org.easymock.EasyMock.*;
+import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * StackManager unit tests.
+ */
+public class StackManagerCommonServicesTest {
+
+  private static StackManager stackManager;
+  private static MetainfoDAO dao;
+  private static ActionMetadata actionMetadata;
+  private static OsFamily osFamily;
+
+  @BeforeClass
+  public static void initStack() throws Exception{
+    stackManager = createTestStackManager();
+  }
+
+  public static StackManager createTestStackManager() throws Exception {
+    String stack = ClassLoader.getSystemClassLoader().getResource("stacks_with_common_services").getPath();
+    String commonServices = ClassLoader.getSystemClassLoader().getResource("common-services").getPath();
+    return createTestStackManager(stack, commonServices);
+  }
+
+  public static StackManager createTestStackManager(String stackRoot, String commonServicesRoot) throws Exception {
+    try {
+      //todo: dao , actionMetaData expectations
+      dao = createNiceMock(MetainfoDAO.class);
+      actionMetadata = createNiceMock(ActionMetadata.class);
+      Configuration config = createNiceMock(Configuration.class);
+      expect(config.getSharedResourcesDirPath()).andReturn(
+          ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
+      replay(config);
+      osFamily = new OsFamily(config);
+
+      replay(dao, actionMetadata);
+      StackManager stackManager = new StackManager(
+          new File(stackRoot), new File(commonServicesRoot), new StackContext(dao, actionMetadata, osFamily));
+      return stackManager;
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  @Test
+  public void testGetStacks_count() throws Exception {
+    Collection<StackInfo> stacks = stackManager.getStacks();
+    assertEquals(2, stacks.size());
+  }
+
+  @Test
+  public void testGetStack_name__count() {
+    Collection<StackInfo> stacks = stackManager.getStacks("HDP");
+    assertEquals(2, stacks.size());
+  }
+
+  @Test
+  public void testGetStack_basic() {
+    StackInfo stack = stackManager.getStack("HDP", "0.1");
+    assertNotNull(stack);
+    assertEquals("HDP", stack.getName());
+    assertEquals("0.1", stack.getVersion());
+
+
+    Collection<ServiceInfo> services = stack.getServices();
+    assertEquals(3, services.size());
+
+    Map<String, ServiceInfo> serviceMap = new HashMap<String, ServiceInfo>();
+    for (ServiceInfo service : services) {
+      serviceMap.put(service.getName(), service);
+    }
+    ServiceInfo hdfsService = serviceMap.get("HDFS");
+    assertNotNull(hdfsService);
+    List<ComponentInfo> components = hdfsService.getComponents();
+    assertEquals(6, components.size());
+    List<PropertyInfo> properties = hdfsService.getProperties();
+    assertEquals(62, properties.size());
+
+    // test a couple of the properties for filename
+    boolean hdfsPropFound = false;
+    boolean hbasePropFound = false;
+    for (PropertyInfo p : properties) {
+      if (p.getName().equals("hbase.regionserver.msginterval")) {
+        assertEquals("hbase-site.xml", p.getFilename());
+        hbasePropFound = true;
+      } else if (p.getName().equals("dfs.name.dir")) {
+        assertEquals("hdfs-site.xml", p.getFilename());
+        hdfsPropFound = true;
+      }
+    }
+    assertTrue(hbasePropFound);
+    assertTrue(hdfsPropFound);
+
+    ServiceInfo mrService = serviceMap.get("MAPREDUCE");
+    assertNotNull(mrService);
+    components = mrService.getComponents();
+    assertEquals(3, components.size());
+
+    ServiceInfo pigService = serviceMap.get("PIG");
+    assertNotNull(pigService);
+    assertEquals("PIG", pigService.getName());
+    assertEquals("1.0", pigService.getVersion());
+    assertEquals("This is comment for PIG service", pigService.getComment());
+    components = pigService.getComponents();
+    assertEquals(1, components.size());
+    CommandScriptDefinition commandScript = pigService.getCommandScript();
+    assertEquals("scripts/service_check.py", commandScript.getScript());
+    assertEquals(CommandScriptDefinition.Type.PYTHON, commandScript.getScriptType());
+    assertEquals(300, commandScript.getTimeout());
+    List<String> configDependencies = pigService.getConfigDependencies();
+    assertEquals(1, configDependencies.size());
+    assertEquals("global", configDependencies.get(0));
+    assertEquals("global", pigService.getConfigDependenciesWithComponents().get(0));
+    ComponentInfo client = pigService.getClientComponent();
+    assertNotNull(client);
+    assertEquals("PIG", client.getName());
+    assertEquals("0+", client.getCardinality());
+    assertEquals("CLIENT", client.getCategory());
+    assertEquals("configuration", pigService.getConfigDir());
+    assertEquals("2.0", pigService.getSchemaVersion());
+    Map<String, ServiceOsSpecific> osInfoMap = pigService.getOsSpecifics();
+    assertEquals(1, osInfoMap.size());
+    ServiceOsSpecific osSpecific = osInfoMap.get("centos6");
+    assertNotNull(osSpecific);
+    assertEquals("centos6", osSpecific.getOsFamily());
+    assertNull(osSpecific.getRepo());
+    List<ServiceOsSpecific.Package> packages = osSpecific.getPackages();
+    assertEquals(1, packages.size());
+    ServiceOsSpecific.Package pkg = packages.get(0);
+    assertEquals("pig", pkg.getName());
+
+    assertEquals(pigService.getParent(), "common-services/PIG/1.0");
+  }
+}

+ 91 - 0
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java

@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Collection;
+
+import static org.easymock.EasyMock.*;
+import static org.junit.Assert.*;
+
+/**
+ * StackManager Misc unit tests.
+ */
+public class StackManagerMiscTest  {
+
+  @Test
+  public void testCycleDetection() throws Exception {
+    MetainfoDAO dao = createNiceMock(MetainfoDAO.class);
+    ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
+    OsFamily osFamily = createNiceMock(OsFamily.class);
+    replay(actionMetadata);
+    try {
+      String stacksCycle1 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle").getPath();
+      StackManager stackManager = new StackManager(new File(stacksCycle1), null,
+          new StackContext(dao, actionMetadata, osFamily));
+      fail("Expected exception due to cyclic stack");
+    } catch (AmbariException e) {
+      // expected
+      assertEquals("Cycle detected while parsing stack definition", e.getMessage());
+    }
+    try {
+      String stacksCycle2 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle2").getPath();
+      StackManager stackManager = new StackManager(new File(stacksCycle2), null,
+          new StackContext(dao, actionMetadata, osFamily));
+      fail("Expected exception due to cyclic stack");
+    } catch (AmbariException e) {
+      // expected
+      assertEquals("Cycle detected while parsing stack definition", e.getMessage());
+    }
+  }
+
+  /**
+   * This test ensures the service status check is added into the action metadata when
+   * the stack has no parent and is the only stack in the stack family
+   */
+  @Test
+  public void testGetServiceInfoFromSingleStack() throws Exception {
+    MetainfoDAO dao = createNiceMock(MetainfoDAO.class);
+    ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
+    OsFamily  osFamily = createNiceMock(OsFamily.class);
+
+    // ensure that service check is added for HDFS
+    actionMetadata.addServiceCheckAction("HDFS");
+    replay(dao, actionMetadata, osFamily);
+    String singleStack = ClassLoader.getSystemClassLoader().getResource("single_stack").getPath();
+
+    StackManager stackManager = new StackManager(
+        new File(singleStack.replace(StackManager.PATH_DELIMITER, File.separator)),
+        null,
+        new StackContext(dao, actionMetadata, osFamily));
+
+    Collection<StackInfo> stacks = stackManager.getStacks();
+    assertEquals(1, stacks.size());
+    assertNotNull(stacks.iterator().next().getService("HDFS"));
+
+    verify(dao, actionMetadata, osFamily);
+  }
+}

+ 8 - 72
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java

@@ -63,10 +63,7 @@ public class StackManagerTest {
   }
   }
 
 
   public static StackManager createTestStackManager() throws Exception {
   public static StackManager createTestStackManager() throws Exception {
-    String stack = "./src/test/resources/stacks/";
-    if (System.getProperty("os.name").contains("Windows")) {
-      stack = ClassLoader.getSystemClassLoader().getResource("stacks").getPath();
-    }
+    String stack = ClassLoader.getSystemClassLoader().getResource("stacks").getPath();
     return createTestStackManager(stack);
     return createTestStackManager(stack);
   }
   }
 
 
@@ -76,17 +73,15 @@ public class StackManagerTest {
       dao = createNiceMock(MetainfoDAO.class);
       dao = createNiceMock(MetainfoDAO.class);
       actionMetadata = createNiceMock(ActionMetadata.class);
       actionMetadata = createNiceMock(ActionMetadata.class);
       Configuration config = createNiceMock(Configuration.class);
       Configuration config = createNiceMock(Configuration.class);
-      if (System.getProperty("os.name").contains("Windows")) {
-        expect(config.getSharedResourcesDirPath()).andReturn(ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
-      }
-      else {
-        expect(config.getSharedResourcesDirPath()).andReturn("./src/test/resources").anyTimes();
-      }
+      expect(config.getSharedResourcesDirPath()).andReturn(
+          ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
       replay(config);
       replay(config);
       osFamily = new OsFamily(config);
       osFamily = new OsFamily(config);
 
 
       replay(dao, actionMetadata);
       replay(dao, actionMetadata);
-      return new StackManager(new File(stackRoot), new StackContext(dao, actionMetadata, osFamily));
+      StackManager stackManager = new StackManager(
+          new File(stackRoot), null, new StackContext(dao, actionMetadata, osFamily));
+      return stackManager;
     } catch (Exception e) {
     } catch (Exception e) {
       e.printStackTrace();
       e.printStackTrace();
       throw e;
       throw e;
@@ -154,7 +149,6 @@ public class StackManagerTest {
     assertNotNull(pigService);
     assertNotNull(pigService);
     assertEquals("PIG", pigService.getName());
     assertEquals("PIG", pigService.getName());
     assertEquals("1.0", pigService.getVersion());
     assertEquals("1.0", pigService.getVersion());
-    assertNull(pigService.getParent());
     assertEquals("This is comment for PIG service", pigService.getComment());
     assertEquals("This is comment for PIG service", pigService.getComment());
     components = pigService.getComponents();
     components = pigService.getComponents();
     assertEquals(1, components.size());
     assertEquals(1, components.size());
@@ -183,6 +177,8 @@ public class StackManagerTest {
     assertEquals(1, packages.size());
     assertEquals(1, packages.size());
     ServiceOsSpecific.Package pkg = packages.get(0);
     ServiceOsSpecific.Package pkg = packages.get(0);
     assertEquals("pig", pkg.getName());
     assertEquals("pig", pkg.getName());
+
+    assertNull(pigService.getParent());
   }
   }
 
 
   @Test
   @Test
@@ -518,38 +514,6 @@ public class StackManagerTest {
     assertTrue(configTypes.containsKey("mapred-queue-acls"));
     assertTrue(configTypes.containsKey("mapred-queue-acls"));
   }
   }
 
 
-  @Test
-  public void testCycleDetection() throws Exception {
-    ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
-    OsFamily osFamily = createNiceMock(OsFamily.class);
-    replay(actionMetadata);
-    try {
-      String stacksCycle1 = "./src/test/resources/stacks_with_cycle/";
-      if (System.getProperty("os.name").contains("Windows")) {
-        stacksCycle1 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle").getPath();
-      }
-      new StackManager(new File(stacksCycle1),
-              new StackContext(null, actionMetadata, osFamily));
-      fail("Expected exception due to cyclic stack");
-    } catch (AmbariException e) {
-      // expected
-      assertEquals("Cycle detected while parsing stack definition", e.getMessage());
-    }
-
-    try {
-      String stacksCycle2 = "./src/test/resources/stacks_with_cycle2/";
-      if (System.getProperty("os.name").contains("Windows")) {
-        stacksCycle2 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle2").getPath();
-      }
-      new StackManager(new File(stacksCycle2),
-              new StackContext(null, actionMetadata, osFamily));
-      fail("Expected exception due to cyclic stack");
-    } catch (AmbariException e) {
-      // expected
-      assertEquals("Cycle detected while parsing stack definition", e.getMessage());
-    }
-  }
-
   @Test
   @Test
   public void testExcludedConfigTypes() {
   public void testExcludedConfigTypes() {
     StackInfo stack = stackManager.getStack("HDP", "2.0.8");
     StackInfo stack = stackManager.getStack("HDP", "2.0.8");
@@ -589,34 +553,6 @@ public class StackManagerTest {
     assertNotNull(hdfsService.getMetricsFile());
     assertNotNull(hdfsService.getMetricsFile());
   }
   }
 
 
-  /**
-   * This test ensures the service status check is added into the action metadata when
-   * the stack has no parent and is the only stack in the stack family
-   */
-  @Test
-  public void testGetServiceInfoFromSingleStack() throws Exception {
-    dao = createNiceMock(MetainfoDAO.class);
-    actionMetadata = createNiceMock(ActionMetadata.class);
-    osFamily = createNiceMock(OsFamily.class);
-
-    // ensure that service check is added for HDFS
-    actionMetadata.addServiceCheckAction("HDFS");
-    replay(dao, actionMetadata, osFamily);
-    String singleStack = "./src/test/resources/single_stack";
-    if (System.getProperty("os.name").contains("Windows")) {
-      singleStack = ClassLoader.getSystemClassLoader().getResource("single_stack").getPath();
-    }
-    StackManager stackManager = new StackManager(
-        new File(singleStack.replace("/", File.separator)),
-        new StackContext(dao, actionMetadata, osFamily));
-
-    Collection<StackInfo> stacks = stackManager.getStacks();
-    assertEquals(1, stacks.size());
-    assertNotNull(stacks.iterator().next().getService("HDFS"));
-
-    verify(dao, actionMetadata, osFamily);
-  }
-
   @Test
   @Test
   public void testMergeRoleCommandOrder() throws Exception {
   public void testMergeRoleCommandOrder() throws Exception {
     StackInfo stack = stackManager.getStack("HDP", "2.1.1");
     StackInfo stack = stackManager.getStack("HDP", "2.1.1");

+ 137 - 0
ambari-server/src/test/resources/common-services/HBASE/1.0/configuration/hbase-site.xml

@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

+ 121 - 0
ambari-server/src/test/resources/common-services/HBASE/1.0/metainfo.xml

@@ -0,0 +1,121 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>This is comment for HBASE service</comment>
+      <version>1.0</version>
+
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_regionserver.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>centos6</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

+ 137 - 0
ambari-server/src/test/resources/common-services/HDFS/1.0/configuration/hbase-site.xml

@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

+ 396 - 0
ambari-server/src/test/resources/common-services/HDFS/1.0/configuration/hdfs-site.xml

@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

+ 133 - 0
ambari-server/src/test/resources/common-services/HDFS/1.0/metainfo.xml

@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>This is comment for HDFS service</comment>
+      <version>1.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <name>hadoop-native</name>
+            </package>
+            <package>
+              <name>hadoop-pipes</name>
+            </package>
+            <package>
+              <name>hadoop-sbin</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 127 - 0
ambari-server/src/test/resources/common-services/HIVE/1.0/metainfo.xml

@@ -0,0 +1,127 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>This is comment for HIVE service</comment>
+      <version>1.0</version>
+
+      <components>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MYSQL_SERVER</name>
+          <category>MASTER</category>
+          <!-- may be 0 if specifying external db, how to specify this? -->
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HIVE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>centos6</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>centos5</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 137 - 0
ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/hbase-site.xml

@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

+ 396 - 0
ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/hdfs-site.xml

@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

+ 400 - 0
ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml

@@ -0,0 +1,400 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+    <description>Address where the datanode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+    <description>HTTP address for the datanode</description>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+<description>The max response size for IPC</description>
+</property>
+
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description>IPC thread size</description>
+</property>
+
+</configuration>

+ 89 - 0
ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/metainfo.xml

@@ -0,0 +1,89 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <comment>This is comment for Mapred service</comment>
+      <version>1.0</version>
+      <components>
+        <component>
+          <name>JOBTRACKER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/jobtracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/jobtracker.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>TASKTRACKER</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/tasktracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+
+      <excluded-config-types>
+        <config-type>hdfs-site</config-type>
+        <config-type>hbase-site</config-type>
+      </excluded-config-types>
+
+    </service>
+  </services>
+</metainfo>

+ 52 - 0
ambari-server/src/test/resources/common-services/PIG/1.0/configuration/pig.properties

@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+#debug level, INFO is default
+debug=INFO
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+#exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+#Enable insertion of information about script into hadoop job conf 
+pig.script.info.enabled=true
+
+#Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+#Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+#Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false

+ 61 - 0
ambari-server/src/test/resources/common-services/PIG/1.0/metainfo.xml

@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <comment>This is comment for PIG service</comment>
+      <version>1.0</version>
+
+      <components>
+        <component>
+          <name>PIG</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>centos6</osFamily>
+          <packages>
+            <package>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

+ 72 - 0
ambari-server/src/test/resources/common-services/ZOOKEEPER/1.0/metainfo.xml

@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>1.0</version>
+
+      <components>
+
+        <component>
+          <name>ZOOKEEPER_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZOOKEEPER_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 22 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/metainfo.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.0</upgrade>
+    </versions>
+</metainfo>

+ 57 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/repos/repoinfo.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="centos6, redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5, redhat5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

+ 46 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/services/HDFS/metainfo.xml

@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <components>
+        <component>
+          <name>DATANODE1</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DATANODE2</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+    </service>
+  </services>
+</metainfo>

+ 23 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/services/MAPREDUCE/metainfo.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.1/services/PIG/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <extends>common-services/PIG/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 22 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/metainfo.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.1</upgrade>
+    </versions>
+</metainfo>

+ 57 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/repos/repoinfo.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HBASE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 145 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml

@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+  
+</configuration>

+ 223 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml

@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>

+ 137 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hbase-site.xml

@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

+ 199 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hdfs-log4j.xml

@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

+ 396 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hdfs-site.xml

@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

+ 30 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HIVE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 23 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/MAPREDUCE/metainfo.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/ZOOKEEPER/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>