Jelajahi Sumber

AMBARI-12885. Dynamic stack extensions - install and upgrade support for custom services (Tim Thorpe via alejandro)

Alejandro Fernandez 9 tahun lalu
induk
melakukan
9ce79716d6
100 mengubah file dengan 7706 tambahan dan 203 penghapusan
  1. 1 0
      ambari-server/conf/unix/ambari.properties
  2. 4 0
      ambari-server/src/main/assemblies/server.xml
  3. 58 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionLinkResourceDefinition.java
  4. 59 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionResourceDefinition.java
  5. 54 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionVersionResourceDefinition.java
  6. 12 0
      ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java
  7. 38 4
      ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
  8. 101 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/ExtensionLinksService.java
  9. 127 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/ExtensionsService.java
  10. 23 0
      ambari-server/src/main/java/org/apache/ambari/server/api/services/StacksService.java
  11. 9 0
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  12. 47 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
  13. 270 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  14. 86 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionLinkRequest.java
  15. 124 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionLinkResponse.java
  16. 43 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionRequest.java
  17. 61 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionResponse.java
  18. 44 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionVersionRequest.java
  19. 101 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionVersionResponse.java
  20. 6 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
  21. 282 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Extension.java
  22. 241 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java
  23. 121 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionResourceProvider.java
  24. 131 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionVersionResourceProvider.java
  25. 6 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java
  26. 168 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
  27. 240 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
  28. 156 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionEntity.java
  29. 139 0
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
  30. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/stack/BaseModule.java
  31. 2 5
      ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java
  32. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/ComponentModule.java
  33. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationModule.java
  34. 196 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionDirectory.java
  35. 167 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
  36. 540 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
  37. 4 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/ModuleFileUnmarshaller.java
  38. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/QuickLinksConfigurationModule.java
  39. 86 28
      ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
  40. 35 22
      ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java
  41. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java
  42. 0 32
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
  43. 214 9
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
  44. 3 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackManagerFactory.java
  45. 184 27
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
  46. 2 4
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java
  47. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/ThemeModule.java
  48. 160 0
      ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionId.java
  49. 208 0
      ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
  50. 11 0
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
  51. 29 0
      ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
  52. 204 0
      ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
  53. 4 0
      ambari-server/src/main/java/org/apache/ambari/server/state/stack/ServiceMetainfoXml.java
  54. 50 3
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
  55. 23 3
      ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
  56. 21 3
      ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
  57. 21 3
      ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
  58. 21 3
      ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
  59. 23 3
      ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
  60. 21 3
      ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
  61. 21 3
      ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
  62. 2 0
      ambari-server/src/main/resources/META-INF/persistence.xml
  63. 31 0
      ambari-server/src/main/resources/extensions/README.txt
  64. 15 1
      ambari-server/src/main/resources/key_properties.json
  65. 25 5
      ambari-server/src/main/resources/properties.json
  66. 119 0
      ambari-server/src/test/java/org/apache/ambari/server/api/services/ExtensionsServiceTest.java
  67. 91 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExtensionResourceProviderTest.java
  68. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/stack/ComponentModuleTest.java
  69. 2 2
      ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
  70. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java
  71. 28 4
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
  72. 131 0
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
  73. 36 11
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
  74. 111 12
      ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
  75. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/stack/ThemeModuleTest.java
  76. 42 1
      ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
  77. 30 0
      ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
  78. 245 0
      ambari-server/src/test/resources/extensions/EXT/0.1/services/OOZIE2/configuration/oozie2-site.xml
  79. 110 0
      ambari-server/src/test/resources/extensions/EXT/0.1/services/OOZIE2/metainfo.xml
  80. 20 0
      ambari-server/src/test/resources/extensions/EXT/0.1/services/OOZIE2/package/dummy-script.py
  81. 26 0
      ambari-server/src/test/resources/extensions/EXT/0.1/services/PIG2/metainfo.xml
  82. 31 0
      ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
  83. 110 0
      ambari-server/src/test/resources/extensions/EXT/0.2/services/OOZIE2/metainfo.xml
  84. 30 0
      ambari-server/src/test/resources/stacks/OTHER/1.0/services/PIG2/metainfo.xml
  85. 22 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/metainfo.xml
  86. 57 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/repos/repoinfo.xml
  87. 46 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/services/HDFS/metainfo.xml
  88. 23 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/services/MAPREDUCE/metainfo.xml
  89. 26 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/services/PIG/metainfo.xml
  90. 22 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/metainfo.xml
  91. 57 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/repos/repoinfo.xml
  92. 26 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HBASE/metainfo.xml
  93. 145 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/global.xml
  94. 223 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
  95. 137 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hbase-site.xml
  96. 199 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hdfs-log4j.xml
  97. 396 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hdfs-site.xml
  98. 30 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/metainfo.xml
  99. 20 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/package/dummy-script.py
  100. 26 0
      ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HIVE/metainfo.xml

+ 1 - 0
ambari-server/conf/unix/ambari.properties

@@ -43,6 +43,7 @@ jce.download.supported=true
 
 metadata.path=$ROOT/var/lib/ambari-server/resources/stacks
 common.services.path=$ROOT/var/lib/ambari-server/resources/common-services
+extensions.path=/var/lib/ambari-server/resources/extensions
 server.version.file=$ROOT/var/lib/ambari-server/resources/version
 webapp.dir=$ROOT/usr/lib/ambari-server/web
 pid.dir=$ROOT/var/run/ambari-server

+ 4 - 0
ambari-server/src/main/assemblies/server.xml

@@ -154,6 +154,10 @@
       <directory>${stacksSrcLocation}</directory>
       <outputDirectory>/var/lib/ambari-server/resources/stacks/${stack.distribution}</outputDirectory>
     </fileSet>
+    <fileSet>
+      <directory>src/main/resources/extensions</directory>
+      <outputDirectory>/var/lib/ambari-server/resources/extensions</outputDirectory>
+    </fileSet>
     <fileSet>
       <directory>src/main/python/ambari_server</directory>
       <outputDirectory>/usr/lib/python2.6/site-packages/ambari_server</outputDirectory>

+ 58 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionLinkResourceDefinition.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.resources;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.Resource.Type;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionLinkResourceDefinition extends BaseResourceDefinition {
+
+  public ExtensionLinkResourceDefinition(Type resourceType) {
+    super(Resource.Type.ExtensionLink);
+  }
+
+  public ExtensionLinkResourceDefinition() {
+    super(Resource.Type.ExtensionLink);
+  }
+
+  @Override
+  public String getPluralName() {
+    return "links";
+  }
+
+  @Override
+  public String getSingularName() {
+    return "link";
+  }
+
+  @Override
+  public Set<SubResourceDefinition> getSubResourceDefinitions() {
+    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    return setChildren;
+  }
+
+}

+ 59 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionResourceDefinition.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.resources;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.Resource.Type;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionResourceDefinition extends BaseResourceDefinition {
+
+  public ExtensionResourceDefinition(Type resourceType) {
+    super(Resource.Type.Extension);
+  }
+
+  public ExtensionResourceDefinition() {
+    super(Resource.Type.Extension);
+  }
+
+  @Override
+  public String getPluralName() {
+    return "extensions";
+  }
+
+  @Override
+  public String getSingularName() {
+    return "extension";
+  }
+
+  @Override
+  public Set<SubResourceDefinition> getSubResourceDefinitions() {
+    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
+    setChildren.add(new SubResourceDefinition(Resource.Type.ExtensionVersion));
+    return setChildren;
+  }
+
+}

+ 54 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ExtensionVersionResourceDefinition.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.resources;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.spi.Resource;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionVersionResourceDefinition extends BaseResourceDefinition {
+
+  public ExtensionVersionResourceDefinition() {
+    super(Resource.Type.ExtensionVersion);
+  }
+
+  @Override
+  public String getPluralName() {
+    return "versions";
+  }
+
+  @Override
+  public String getSingularName() {
+    return "version";
+  }
+
+  @Override
+  public Set<SubResourceDefinition> getSubResourceDefinitions() {
+
+    Set<SubResourceDefinition> children = new HashSet<SubResourceDefinition>();
+
+    return children;
+  }
+}

+ 12 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java

@@ -187,6 +187,18 @@ public class ResourceInstanceFactoryImpl implements ResourceInstanceFactory {
         resourceDefinition = new StackConfigurationDependencyResourceDefinition();
         break;
 
+      case Extension:
+        resourceDefinition = new ExtensionResourceDefinition();
+        break;
+
+      case ExtensionVersion:
+        resourceDefinition = new ExtensionVersionResourceDefinition();
+        break;
+
+      case ExtensionLink:
+        resourceDefinition = new ExtensionLinkResourceDefinition();
+        break;
+
       case OperatingSystem:
         resourceDefinition = new OperatingSystemResourceDefinition();
         break;

+ 38 - 4
ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java

@@ -63,6 +63,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.DependencyInfo;
+import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.OperatingSystemInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.RepositoryInfo;
@@ -108,12 +109,12 @@ public class AmbariMetaInfo {
   public static final String SERVICE_ADVISOR_FILE_NAME = "service_advisor.py";
 
   /**
-   * The filename name for a Kerberos descriptor file at either the stack or service level
+   * The filename for a Kerberos descriptor file at either the stack or service level
    */
   public static final String KERBEROS_DESCRIPTOR_FILE_NAME = "kerberos.json";
 
   /**
-   * The filename name for a Widgets descriptor file at either the stack or service level
+   * The filename for a Widgets descriptor file at either the stack or service level
    */
   public static final String WIDGETS_DESCRIPTOR_FILE_NAME = "widgets.json";
 
@@ -156,6 +157,7 @@ public class AmbariMetaInfo {
 
   private File stackRoot;
   private File commonServicesRoot;
+  private File extensionsRoot;
   private File serverVersionFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
@@ -237,6 +239,11 @@ public class AmbariMetaInfo {
       commonServicesRoot = new File(commonServicesPath);
     }
 
+    String extensionsPath = conf.getExtensionsPath();
+    if (extensionsPath != null && !extensionsPath.isEmpty()) {
+      extensionsRoot = new File(extensionsPath);
+    }
+
     String serverVersionFilePath = conf.getServerVersionFilePath();
     serverVersionFile = new File(serverVersionFilePath);
 
@@ -255,7 +262,7 @@ public class AmbariMetaInfo {
 
     readServerVersion();
 
-    stackManager = stackManagerFactory.create(stackRoot, commonServicesRoot,
+    stackManager = stackManagerFactory.create(stackRoot, commonServicesRoot, extensionsRoot,
         osFamily, false);
 
     getCustomActionDefinitions(customActionRoot);
@@ -629,6 +636,30 @@ public class AmbariMetaInfo {
     return parents;
   }
 
+  public Collection<ExtensionInfo> getExtensions() {
+    return stackManager.getExtensions();
+  }
+
+  public Collection<ExtensionInfo> getExtensions(String extensionName) throws AmbariException {
+    Collection<ExtensionInfo> extensions = stackManager.getExtensions(extensionName);
+
+    if (extensions.isEmpty()) {
+      throw new StackAccessException("extensionName=" + extensionName);
+    }
+
+    return extensions;
+  }
+
+  public ExtensionInfo getExtension(String extensionName, String version) throws AmbariException {
+    ExtensionInfo result = stackManager.getExtension(extensionName, version);
+
+    if (result == null) {
+      throw new StackAccessException("Extension " + extensionName + " " + version + " is not found in Ambari metainfo");
+    }
+
+    return result;
+  }
+
   public Set<PropertyInfo> getServiceProperties(String stackName, String version, String serviceName)
       throws AmbariException {
 
@@ -700,7 +731,6 @@ public class AmbariMetaInfo {
     return propertyResult;
   }
 
-
   /**
    * Lists operatingsystems supported by stack
    */
@@ -862,6 +892,10 @@ public class AmbariMetaInfo {
     return stackRoot;
   }
 
+  public File getExtensionsRoot() {
+    return extensionsRoot;
+  }
+
   /**
    * Return metrics for a stack service.
    */

+ 101 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/ExtensionLinksService.java

@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.controller.spi.Resource;
+
+/**
+ * Service for extension link management.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@Path("/links/")
+public class ExtensionLinksService extends BaseService {
+
+  @GET
+  @Produces("text/plain")
+  public Response getExtensionLinks(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET, createExtensionLinkResource(null));
+  }
+
+  @GET
+  @Path("{linkId}")
+  @Produces("text/plain")
+  public Response getExtensionLink(String body, @Context HttpHeaders headers,
+                                  @Context UriInfo ui, @PathParam("linkId") String linkId) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET, createExtensionLinkResource(linkId));
+  }
+
+  @POST
+  @Produces("text/plain")
+  public Response createExtensionLink(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
+    return handleRequest(headers, body, ui, Request.Type.POST, createExtensionLinkResource(null));
+  }
+
+  @DELETE
+  @Path("{linkId}")
+  @Produces("text/plain")
+  public Response deleteExtensionLink(@Context HttpHeaders headers, @Context UriInfo ui,
+                                @PathParam("linkId") String linkId) {
+
+    return handleRequest(headers, null, ui, Request.Type.DELETE, createExtensionLinkResource(linkId));
+  }
+
+  @PUT
+  @Produces("text/plain")
+  public Response updateExtensionLink(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
+    return handleRequest(headers, body, ui, Request.Type.PUT, createExtensionLinkResource(null));
+  }
+
+  @PUT
+  @Path("{linkId}")
+  @Produces("text/plain")
+  public Response updateExtensionLink(String body, @Context HttpHeaders headers, @Context UriInfo ui,
+          @PathParam("linkId") String linkId) {
+
+    return handleRequest(headers, body, ui, Request.Type.PUT, createExtensionLinkResource(linkId));
+  }
+
+  ResourceInstance createExtensionLinkResource(String linkId) {
+    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
+    mapIds.put(Resource.Type.ExtensionLink, linkId);
+    return createResource(Resource.Type.ExtensionLink, mapIds);
+  }
+
+}

+ 127 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/ExtensionsService.java

@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.controller.spi.Resource;
+
+/**
+ * Service for extensions management.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@Path("/extensions/")
+public class ExtensionsService extends BaseService {
+
+  @GET
+  @Produces("text/plain")
+  public Response getExtensions(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET,
+        createExtensionResource(null));
+  }
+
+  @GET
+  @Path("{extensionName}")
+  @Produces("text/plain")
+  public Response getExtension(String body, @Context HttpHeaders headers,
+                           @Context UriInfo ui,
+                           @PathParam("extensionName") String extensionName) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET,
+        createExtensionResource(extensionName));
+  }
+
+  @GET
+  @Path("{extensionName}/versions")
+  @Produces("text/plain")
+  public Response getExtensionVersions(String body,
+                                   @Context HttpHeaders headers,
+                                   @Context UriInfo ui, @PathParam("extensionName") String extensionName) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET,
+        createExtensionVersionResource(extensionName, null));
+  }
+
+  @GET
+  @Path("{extensionName}/versions/{extensionVersion}")
+  @Produces("text/plain")
+  public Response getExtensionVersion(String body,
+                                  @Context HttpHeaders headers,
+                                  @Context UriInfo ui, @PathParam("extensionName") String extensionName,
+                                  @PathParam("extensionVersion") String extensionVersion) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET,
+        createExtensionVersionResource(extensionName, extensionVersion));
+  }
+
+  @GET
+  @Path("{extensionName}/versions/{extensionVersion}/links")
+  @Produces("text/plain")
+  public Response getExtensionVersionLinks(String body,
+                                  @Context HttpHeaders headers,
+                                  @Context UriInfo ui, @PathParam("extensionName") String extensionName,
+                                  @PathParam("extensionVersion") String extensionVersion) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET,
+        createExtensionLinkResource(null, null, extensionName, extensionVersion));
+  }
+
+  ResourceInstance createExtensionVersionResource(String extensionName,
+                                              String extensionVersion) {
+    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
+    mapIds.put(Resource.Type.Extension, extensionName);
+    mapIds.put(Resource.Type.ExtensionVersion, extensionVersion);
+
+    return createResource(Resource.Type.ExtensionVersion, mapIds);
+  }
+
+  ResourceInstance createExtensionLinkResource(String stackName, String stackVersion,
+                                  String extensionName, String extensionVersion) {
+    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
+    mapIds.put(Resource.Type.Stack, stackName);
+    mapIds.put(Resource.Type.StackVersion, stackVersion);
+    mapIds.put(Resource.Type.Extension, extensionName);
+    mapIds.put(Resource.Type.ExtensionVersion, extensionVersion);
+
+    return createResource(Resource.Type.ExtensionLink, mapIds);
+  }
+
+  ResourceInstance createExtensionResource(String extensionName) {
+
+    return createResource(Resource.Type.Extension,
+        Collections.singletonMap(Resource.Type.Extension, extensionName));
+
+  }
+}

+ 23 - 0
ambari-server/src/main/java/org/apache/ambari/server/api/services/StacksService.java

@@ -82,6 +82,18 @@ public class StacksService extends BaseService {
         createStackVersionResource(stackName, stackVersion));
   }
 
+  @GET
+  @Path("{stackName}/versions/{stackVersion}/links")
+  @Produces("text/plain")
+  public Response getStackVersionLinks(String body,
+                                  @Context HttpHeaders headers,
+                                  @Context UriInfo ui, @PathParam("stackName") String stackName,
+                                  @PathParam("stackVersion") String stackVersion) {
+
+    return handleRequest(headers, body, ui, Request.Type.GET,
+        createExtensionLinkResource(stackName, stackVersion, null, null));
+  }
+
   @GET
   @Path("{stackName}/versions/{stackVersion}/configurations")
   @Produces("text/plain")
@@ -491,6 +503,17 @@ public class StacksService extends BaseService {
     return createResource(Resource.Type.QuickLink, mapIds);
   }
 
+  ResourceInstance createExtensionLinkResource(String stackName, String stackVersion,
+                                  String extensionName, String extensionVersion) {
+    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
+    mapIds.put(Resource.Type.Stack, stackName);
+    mapIds.put(Resource.Type.StackVersion, stackVersion);
+    mapIds.put(Resource.Type.Extension, extensionName);
+    mapIds.put(Resource.Type.ExtensionVersion, extensionVersion);
+
+    return createResource(Resource.Type.ExtensionLink, mapIds);
+  }
+
   ResourceInstance createStackResource(String stackName) {
 
     return createResource(Resource.Type.Stack,

+ 9 - 0
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -137,6 +137,7 @@ public class Configuration {
   public static final String RESOURCES_DIR_KEY = "resources.dir";
   public static final String METADATA_DIR_PATH = "metadata.path";
   public static final String COMMON_SERVICES_DIR_PATH = "common.services.path";
+  public static final String EXTENSIONS_DIR_PATH = "extensions.path";
   public static final String MPACKS_STAGING_DIR_PATH = "mpacks.staging.path";
   public static final String SERVER_VERSION_FILE = "server.version.file";
   public static final String SERVER_VERSION_KEY = "version";
@@ -1478,6 +1479,14 @@ public class Configuration {
     return properties.getProperty(COMMON_SERVICES_DIR_PATH);
   }
 
+  /**
+   * Gets ambari extensions-path
+   * @return String
+   */
+  public String getExtensionsPath() {
+    return properties.getProperty(EXTENSIONS_DIR_PATH);
+  }
+
   /**
    * Gets ambari management packs staging directory
    * @return String

+ 47 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java

@@ -34,6 +34,7 @@ import org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetric
 import org.apache.ambari.server.events.AmbariEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.scheduler.ExecutionScheduleManager;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -355,6 +356,52 @@ public interface AmbariManagementController {
    */
   RequestStatusResponse updateStacks() throws AmbariException;
 
+  /**
+   * Create a link between an extension and a stack
+   *
+   * @throws AmbariException if we fail to link the extension to the stack
+   */
+  public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException;
+
+  /**
+   * Update a link between an extension and a stack
+   *
+   * @throws AmbariException if we fail to link the extension to the stack
+   */
+  public void updateExtensionLink(ExtensionLinkRequest request) throws AmbariException;
+
+  /**
+   * Update a link between an extension and a stack
+   *
+   * @throws AmbariException if we fail to link the extension to the stack
+   */
+  public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException;
+
+  /**
+   * Delete a link between an extension and a stack
+   *
+   * @throws AmbariException if we fail to unlink the extension from the stack
+   */
+  public void deleteExtensionLink(ExtensionLinkRequest request) throws AmbariException;
+
+  /**
+   * Get supported extensions.
+   *
+   * @param requests the extensions
+   * @return a set of extensions responses
+   * @throws  AmbariException if the resources cannot be read
+   */
+  public Set<ExtensionResponse> getExtensions(Set<ExtensionRequest> requests) throws AmbariException;
+
+  /**
+   * Get supported extension versions.
+   *
+   * @param requests the extension versions
+   * @return a set of extension versions responses
+   * @throws  AmbariException if the resources cannot be read
+   */
+  public Set<ExtensionVersionResponse> getExtensionVersions(Set<ExtensionVersionRequest> requests) throws AmbariException;
+
   /**
    * Get supported stacks versions.
    *
@@ -366,7 +413,6 @@ public interface AmbariManagementController {
    */
   Set<StackVersionResponse> getStackVersions(Set<StackVersionRequest> requests) throws AmbariException;
 
-
   /**
    * Get repositories by stack name, version and operating system.
    *
@@ -431,7 +477,6 @@ public interface AmbariManagementController {
    */
   Set<StackServiceComponentResponse> getStackComponents(Set<StackServiceComponentRequest> requests) throws AmbariException;
 
-
   /**
    * Get operating systems by stack name, version.
    *

+ 270 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -65,6 +65,8 @@ import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 
+import javax.persistence.RollbackException;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
@@ -102,14 +104,20 @@ import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.dao.WidgetDAO;
 import org.apache.ambari.server.orm.dao.WidgetLayoutDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
 import org.apache.ambari.server.orm.entities.RepositoryEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.WidgetEntity;
 import org.apache.ambari.server.orm.entities.WidgetLayoutEntity;
 import org.apache.ambari.server.orm.entities.WidgetLayoutUserWidgetEntity;
@@ -129,6 +137,8 @@ import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.security.ldap.LdapSyncDto;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
 import org.apache.ambari.server.serveraction.kerberos.KerberosOperationException;
+import org.apache.ambari.server.stack.ExtensionHelper;
+import org.apache.ambari.server.stack.StackManager;
 import org.apache.ambari.server.stageplanner.RoleGraph;
 import org.apache.ambari.server.stageplanner.RoleGraphFactory;
 import org.apache.ambari.server.state.Cluster;
@@ -139,6 +149,7 @@ import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
@@ -166,6 +177,7 @@ import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.apache.ambari.server.state.stack.RepositoryXml;
+import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 import org.apache.ambari.server.state.stack.WidgetLayout;
 import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
@@ -271,6 +283,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   private MaintenanceStateHelper maintenanceStateHelper;
 
+  @Inject
+  private ExtensionLinkDAO linkDAO;
+  @Inject
+  private ExtensionDAO extensionDAO;
+  @Inject
+  private StackDAO stackDAO;
+
   /**
    * The KerberosHelper to help setup for enabling for disabling Kerberos
    */
@@ -3782,6 +3801,94 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     return null;
   }
 
+  @Override
+  public Set<ExtensionResponse> getExtensions(Set<ExtensionRequest> requests)
+      throws AmbariException {
+    Set<ExtensionResponse> response = new HashSet<ExtensionResponse>();
+    for (ExtensionRequest request : requests) {
+      try {
+        response.addAll(getExtensions(request));
+      } catch (StackAccessException e) {
+        if (requests.size() == 1) {
+          // only throw exception if 1 request.
+          // there will be > 1 request in case of OR predicate
+          throw e;
+        }
+      }
+    }
+    return response;
+
+  }
+
+
+  private Set<ExtensionResponse> getExtensions(ExtensionRequest request)
+      throws AmbariException {
+    Set<ExtensionResponse> response;
+
+    String extensionName = request.getExtensionName();
+
+    if (extensionName != null) {
+      // this will throw an exception if the extension doesn't exist
+      ambariMetaInfo.getExtensions(extensionName);
+      response = Collections.singleton(new ExtensionResponse(extensionName));
+    } else {
+      Collection<ExtensionInfo> supportedExtensions = ambariMetaInfo.getExtensions();
+      response = new HashSet<ExtensionResponse>();
+      for (ExtensionInfo extension: supportedExtensions) {
+        response.add(new ExtensionResponse(extension.getName()));
+      }
+    }
+    return response;
+  }
+
+  @Override
+  public Set<ExtensionVersionResponse> getExtensionVersions(
+      Set<ExtensionVersionRequest> requests) throws AmbariException {
+    Set<ExtensionVersionResponse> response = new HashSet<ExtensionVersionResponse>();
+    for (ExtensionVersionRequest request : requests) {
+      String extensionName = request.getExtensionName();
+      try {
+        Set<ExtensionVersionResponse> stackVersions = getExtensionVersions(request);
+        for (ExtensionVersionResponse stackVersionResponse : stackVersions) {
+          stackVersionResponse.setExtensionName(extensionName);
+        }
+        response.addAll(stackVersions);
+      } catch (StackAccessException e) {
+        if (requests.size() == 1) {
+          // only throw exception if 1 request.
+          // there will be > 1 request in case of OR predicate
+          throw e;
+        }
+      }
+    }
+
+    return response;
+  }
+
+  private Set<ExtensionVersionResponse> getExtensionVersions(ExtensionVersionRequest request) throws AmbariException {
+    Set<ExtensionVersionResponse> response;
+
+    String extensionName = request.getExtensionName();
+    String extensionVersion = request.getExtensionVersion();
+
+    if (extensionVersion != null) {
+      ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(extensionName, extensionVersion);
+      response = Collections.singleton(extensionInfo.convertToResponse());
+    } else {
+      try {
+        Collection<ExtensionInfo> extensionInfos = ambariMetaInfo.getExtensions(extensionName);
+        response = new HashSet<ExtensionVersionResponse>();
+        for (ExtensionInfo extensionInfo: extensionInfos) {
+          response.add(extensionInfo.convertToResponse());
+        }
+      } catch (StackAccessException e) {
+        response = Collections.emptySet();
+      }
+    }
+
+    return response;
+  }
+
   @Override
   public Set<RepositoryResponse> getRepositories(Set<RepositoryRequest> requests)
       throws AmbariException {
@@ -4848,4 +4955,167 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
   }
+
+  /**
+   * This method will delete a link between an extension version and a stack version (Extension Link).
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  @Override
+  public void deleteExtensionLink(ExtensionLinkRequest request) throws AmbariException {
+    if (request.getLinkId() == null) {
+      throw new IllegalArgumentException("Link ID should be provided");
+    }
+    ExtensionLinkEntity linkEntity = null;
+    try {
+      linkEntity = linkDAO.findById(new Long(request.getLinkId()));
+    } catch (RollbackException e) {
+      throw new AmbariException("Unable to find extension link"
+            + ", linkId=" + request.getLinkId(), e);
+    }
+
+    StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
+
+    if (stackInfo == null)
+      throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+
+    ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
+
+    if (extensionInfo == null)
+      throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+
+    ExtensionHelper.validateDeleteLink(getClusters(), stackInfo, extensionInfo);
+    ambariMetaInfo.getStackManager().unlinkStackAndExtension(stackInfo, extensionInfo);
+
+    try {
+      linkDAO.remove(linkEntity);
+    } catch (RollbackException e) {
+      throw new AmbariException("Unable to delete extension link"
+              + ", linkId=" + request.getLinkId()
+              + ", stackName=" + request.getStackName()
+              + ", stackVersion=" + request.getStackVersion()
+              + ", extensionName=" + request.getExtensionName()
+              + ", extensionVersion=" + request.getExtensionVersion(), e);
+    }
+  }
+
+  /**
+   * This method will create a link between an extension version and a stack version (Extension Link).
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  @Override
+  public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException {
+    validateCreateExtensionLinkRequest(request);
+
+    StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
+
+    if (stackInfo == null)
+      throw new StackAccessException("stackName=" + request.getStackName() + ", stackVersion=" + request.getStackVersion());
+
+    ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(request.getExtensionName(), request.getExtensionVersion());
+
+    if (extensionInfo == null)
+      throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
+
+    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
+    ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
+
+    try {
+      linkDAO.create(linkEntity);
+      linkEntity = linkDAO.merge(linkEntity);
+    } catch (RollbackException e) {
+      String message = "Unable to create extension link";
+      LOG.debug(message, e);
+      String errorMessage = message
+              + ", stackName=" + request.getStackName()
+              + ", stackVersion=" + request.getStackVersion()
+              + ", extensionName=" + request.getExtensionName()
+              + ", extensionVersion=" + request.getExtensionVersion();
+      LOG.warn(errorMessage);
+      throw new AmbariException(errorMessage, e);
+    }
+  }
+
+  /**
+   * This method will update a link between an extension version and a stack version (Extension Link).
+   * Updating will only force ambari server to reread the stack and extension directories.
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  @Override
+  public void updateExtensionLink(ExtensionLinkRequest request) throws AmbariException {
+    if (request.getLinkId() == null) {
+      throw new AmbariException("Link ID should be provided");
+    }
+    ExtensionLinkEntity linkEntity = null;
+    try {
+      linkEntity = linkDAO.findById(new Long(request.getLinkId()));
+    } catch (RollbackException e) {
+      throw new AmbariException("Unable to find extension link"
+            + ", linkId=" + request.getLinkId(), e);
+    }
+    updateExtensionLink(linkEntity);
+  }
+
+  /**
+   * This method will update a link between an extension version and a stack version (Extension Link).
+   * Updating will only force ambari server to reread the stack and extension directories.
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  @Override
+  public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException {
+    StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
+
+    if (stackInfo == null)
+      throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+
+    ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
+
+    if (extensionInfo == null)
+      throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+
+    ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
+  }
+
+  private void validateCreateExtensionLinkRequest(ExtensionLinkRequest request) throws AmbariException {
+    if (request.getStackName() == null
+            || request.getStackVersion() == null
+            || request.getExtensionName() == null
+            || request.getExtensionVersion() == null) {
+
+      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+    }
+
+    ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(request.getStackName(), request.getStackVersion(),
+            request.getExtensionName(), request.getExtensionVersion());
+
+    if (entity != null) {
+      throw new AmbariException("The stack and extension are already linked"
+                + ", stackName=" + request.getStackName()
+                + ", stackVersion=" + request.getStackVersion()
+                + ", extensionName=" + request.getExtensionName()
+                + ", extensionVersion=" + request.getExtensionVersion());
+    }
+  }
+
+  private ExtensionLinkEntity createExtensionLinkEntity(ExtensionLinkRequest request) throws AmbariException {
+    StackEntity stack = stackDAO.find(request.getStackName(), request.getStackVersion());
+    ExtensionEntity extension = extensionDAO.find(request.getExtensionName(), request.getExtensionVersion());
+
+    ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
+    linkEntity.setStack(stack);
+    linkEntity.setExtension(extension);
+    return linkEntity;
+  }
 }

+ 86 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionLinkRequest.java

@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionLinkRequest {
+
+  private String linkId;
+
+  private String stackName;
+
+  private String stackVersion;
+
+  private String extensionName;
+
+  private String extensionVersion;
+
+  public ExtensionLinkRequest(String linkId, String stackName, String stackVersion, String extensionName, String extensionVersion) {
+    this.setLinkId(linkId);
+    this.setStackName(stackName);
+    this.setStackVersion(stackVersion);
+    this.setExtensionName(extensionName);
+    this.setExtensionVersion(extensionVersion);
+  }
+
+  public String getLinkId() {
+    return linkId;
+  }
+
+  public void setLinkId(String linkId) {
+    this.linkId = linkId;
+  }
+
+  public String getStackName() {
+    return stackName;
+  }
+
+  public void setStackName(String stackName) {
+    this.stackName = stackName;
+  }
+
+  public String getStackVersion() {
+    return stackVersion;
+  }
+
+  public void setStackVersion(String stackVersion) {
+    this.stackVersion = stackVersion;
+  }
+
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  public void setExtensionName(String extensionName) {
+    this.extensionName = extensionName;
+  }
+
+  public String getExtensionVersion() {
+    return extensionVersion;
+  }
+
+  public void setExtensionVersion(String extensionVersion) {
+    this.extensionVersion = extensionVersion;
+  }
+
+}

+ 124 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionLinkResponse.java

@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.stack.Validable;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionLinkResponse implements Validable {
+
+  private String linkId;
+
+  private String stackName;
+
+  private String stackVersion;
+
+  private String extensionName;
+
+  private String extensionVersion;
+
+  private boolean valid;
+
+  private Set<String> errorSet = new HashSet<String>();
+
+  public ExtensionLinkResponse(String linkId, String stackName, String stackVersion, String extensionName,
+                              String extensionVersion, boolean valid, Collection errorSet) {
+
+    setLinkId(linkId);
+    setStackName(stackName);
+    setStackVersion(stackVersion);
+    setExtensionName(extensionName);
+    setExtensionVersion(extensionVersion);
+    setValid(valid);
+    addErrors(errorSet);
+  }
+
+  public String getLinkId() {
+    return linkId;
+  }
+
+  public void setLinkId(String linkId) {
+    this.linkId = linkId;
+  }
+
+  public String getStackName() {
+    return stackName;
+  }
+
+  public void setStackName(String stackName) {
+    this.stackName = stackName;
+  }
+
+  public String getStackVersion() {
+    return stackVersion;
+  }
+
+  public void setStackVersion(String stackVersion) {
+    this.stackVersion = stackVersion;
+  }
+
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  public void setExtensionName(String extensionName) {
+    this.extensionName = extensionName;
+  }
+
+  public String getExtensionVersion() {
+    return extensionVersion;
+  }
+
+  public void setExtensionVersion(String extensionVersion) {
+    this.extensionVersion = extensionVersion;
+  }
+
+  @Override
+  public boolean isValid() {
+    return valid;
+  }
+
+  @Override
+  public void setValid(boolean valid) {
+    this.valid = valid;
+  }
+
+  @Override
+  public void addError(String error) {
+    errorSet.add(error);
+  }
+
+  @Override
+  public Collection<String> getErrors() {
+    return errorSet;
+  }
+
+  @Override
+  public void addErrors(Collection<String> errors) {
+    this.errorSet.addAll(errors);
+  }
+}

+ 43 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionRequest.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionRequest {
+
+  public ExtensionRequest(String extensionName) {
+    this.setExtensionName(extensionName);
+
+  }
+
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  public void setExtensionName(String extensionName) {
+    this.extensionName = extensionName;
+  }
+
+  private String extensionName;
+
+}

+ 61 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionResponse.java

@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionResponse {
+
+  private String extensionName;
+
+  public ExtensionResponse(String extensionName) {
+    setExtensionName(extensionName);
+  }
+
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  public void setExtensionName(String extensionName) {
+    this.extensionName = extensionName;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = 1;
+    result = 31 + getExtensionName().hashCode();
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof ExtensionResponse)) {
+      return false;
+    }
+    if (this == obj) {
+      return true;
+    }
+    ExtensionResponse extensionResponse = (ExtensionResponse) obj;
+    return getExtensionName().equals(extensionResponse.getExtensionName());
+  }
+
+}

+ 44 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionVersionRequest.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.ambari.server.controller;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionVersionRequest extends ExtensionRequest {
+
+  public ExtensionVersionRequest(String extensionName, String extensionVersion) {
+    super(extensionName);
+    setExtensionVersion(extensionVersion);
+  }
+
+  public String getExtensionVersion() {
+    return extensionVersion;
+  }
+
+  public void setExtensionVersion(String extensionVersion) {
+    this.extensionVersion = extensionVersion;
+  }
+
+  private String extensionVersion;
+
+}

+ 101 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/ExtensionVersionResponse.java

@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.stack.Validable;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionVersionResponse implements Validable{
+
+  private String extensionName;
+  private String extensionVersion;
+  private boolean valid;
+  private String parentVersion;
+
+  public ExtensionVersionResponse(String extensionVersion, String parentVersion,
+                              boolean valid, Collection errorSet) {
+    setExtensionVersion(extensionVersion);
+    setParentVersion(parentVersion);
+    setValid(valid);
+    addErrors(errorSet);
+  }
+
+  @Override
+  public boolean isValid() {
+    return valid;
+  }
+
+  @Override
+  public void setValid(boolean valid) {
+    this.valid = valid;
+  }
+
+  private Set<String> errorSet = new HashSet<String>();
+
+  @Override
+  public void addError(String error) {
+    errorSet.add(error);
+  }
+
+  @Override
+  public Collection<String> getErrors() {
+    return errorSet;
+  }
+
+  @Override
+  public void addErrors(Collection<String> errors) {
+    this.errorSet.addAll(errors);
+  }
+
+
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  public void setExtensionName(String extensionName) {
+    this.extensionName = extensionName;
+  }
+
+  public String getExtensionVersion() {
+    return extensionVersion;
+  }
+
+  public void setExtensionVersion(String extensionVersion) {
+    this.extensionVersion = extensionVersion;
+  }
+
+  public String getParentVersion() {
+    return parentVersion;
+  }
+
+  public void setParentVersion(String parentVersion) {
+    this.parentVersion = parentVersion;
+  }
+}

+ 6 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java

@@ -173,6 +173,12 @@ public abstract class AbstractControllerResourceProvider extends AbstractAuthori
         return new StackConfigurationDependencyResourceProvider(propertyIds, keyPropertyIds, managementController);
       case StackLevelConfiguration:
         return new StackLevelConfigurationResourceProvider(propertyIds, keyPropertyIds, managementController);
+      case ExtensionLink:
+          return new ExtensionLinkResourceProvider(propertyIds, keyPropertyIds, managementController);
+      case Extension:
+        return new ExtensionResourceProvider(propertyIds, keyPropertyIds, managementController);
+      case ExtensionVersion:
+        return new ExtensionVersionResourceProvider(propertyIds, keyPropertyIds, managementController);
       case RootService:
         return new RootServiceResourceProvider(propertyIds, keyPropertyIds, managementController);
       case RootServiceComponent:

+ 282 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Extension.java

@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.state.AutoDeployInfo;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.DependencyInfo;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.topology.Cardinality;
+import org.apache.ambari.server.topology.Configuration;
+
+/**
+ * Encapsulates extension information.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class Extension {
+  /**
+   * Extension name
+   */
+  private String name;
+
+  /**
+   * Extension version
+   */
+  private String version;
+
+  /**
+   * Map of service name to components
+   */
+  private Map<String, Collection<String>> serviceComponents =
+      new HashMap<String, Collection<String>>();
+
+  /**
+   * Map of component to service
+   */
+  private Map<String, String> componentService = new HashMap<String, String>();
+
+  /**
+   * Map of component to dependencies
+   */
+  private Map<String, Collection<DependencyInfo>> dependencies =
+      new HashMap<String, Collection<DependencyInfo>>();
+
+  /**
+   * Map of dependency to conditional service
+   */
+  private Map<DependencyInfo, String> dependencyConditionalServiceMap =
+      new HashMap<DependencyInfo, String>();
+
+  /**
+   * Map of database component name to configuration property which indicates whether
+   * the database in to be managed or if it is an external non-managed instance.
+   * If the value of the config property starts with 'New', the database is determined
+   * to be managed, otherwise it is non-managed.
+   */
+  private Map<String, String> dbDependencyInfo = new HashMap<String, String>();
+
+  /**
+   * Map of component to required cardinality
+   */
+  private Map<String, String> cardinalityRequirements = new HashMap<String, String>();
+
+  /**
+   * Map of component to auto-deploy information
+   */
+  private Map<String, AutoDeployInfo> componentAutoDeployInfo =
+      new HashMap<String, AutoDeployInfo>();
+
+  /**
+   * Ambari Management Controller, used to obtain Extension definitions
+   */
+  private final AmbariManagementController controller;
+
+
+  /**
+   * Constructor.
+   *
+   * @param extension
+   *          the extension (not {@code null}).
+   * @param ambariManagementController
+   *          the management controller (not {@code null}).
+   * @throws AmbariException
+   */
+  public Extension(ExtensionEntity extension, AmbariManagementController ambariManagementController) throws AmbariException {
+    this(extension.getExtensionName(), extension.getExtensionVersion(), ambariManagementController);
+  }
+
+  /**
+   * Constructor.
+   *
+   * @param name     extension name
+   * @param version  extension version
+   *
+   * @throws AmbariException an exception occurred getting extension information
+   *                         for the specified name and version
+   */
+  //todo: don't pass management controller in constructor
+  public Extension(String name, String version, AmbariManagementController controller) throws AmbariException {
+    this.name = name;
+    this.version = version;
+    this.controller = controller;
+  }
+
+  /**
+   * Obtain extension name.
+   *
+   * @return extension name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * Obtain extension version.
+   *
+   * @return extension version
+   */
+  public String getVersion() {
+    return version;
+  }
+
+
+  Map<DependencyInfo, String> getDependencyConditionalServiceMap() {
+    return dependencyConditionalServiceMap;
+  }
+
+  /**
+   * Get services contained in the extension.
+   *
+   * @return collection of all services for the extension
+   */
+  public Collection<String> getServices() {
+    return serviceComponents.keySet();
+  }
+
+  /**
+   * Get components contained in the extension for the specified service.
+   *
+   * @param service  service name
+   *
+   * @return collection of component names for the specified service
+   */
+  public Collection<String> getComponents(String service) {
+    return serviceComponents.get(service);
+  }
+
+  /**
+   * Get all service components
+   *
+   * @return map of service to associated components
+   */
+  public Map<String, Collection<String>> getComponents() {
+    Map<String, Collection<String>> serviceComponents = new HashMap<String, Collection<String>>();
+    for (String service : getServices()) {
+      Collection<String> components = new HashSet<String>();
+      components.addAll(getComponents(service));
+      serviceComponents.put(service, components);
+    }
+    return serviceComponents;
+  }
+
+  /**
+   * Get info for the specified component.
+   *
+   * @param component  component name
+   *
+   * @return component information for the requested component
+   *         or null if the component doesn't exist in the extension
+   */
+  public ComponentInfo getComponentInfo(String component) {
+    ComponentInfo componentInfo = null;
+    String service = getServiceForComponent(component);
+    if (service != null) {
+      try {
+        componentInfo = controller.getAmbariMetaInfo().getComponent(
+            getName(), getVersion(), service, component);
+      } catch (AmbariException e) {
+        // just return null if component doesn't exist
+      }
+    }
+    return componentInfo;
+  }
+
+  /**
+   * Get the service for the specified component.
+   *
+   * @param component  component name
+   *
+   * @return service name that contains tha specified component
+   */
+  public String getServiceForComponent(String component) {
+    return componentService.get(component);
+  }
+
+  /**
+   * Get the names of the services which contains the specified components.
+   *
+   * @param components collection of components
+   *
+   * @return collection of services which contain the specified components
+   */
+  public Collection<String> getServicesForComponents(Collection<String> components) {
+    Set<String> services = new HashSet<String>();
+    for (String component : components) {
+      services.add(getServiceForComponent(component));
+    }
+
+    return services;
+  }
+
+  /**
+   * Return the dependencies specified for the given component.
+   *
+   * @param component  component to get dependency information for
+   *
+   * @return collection of dependency information for the specified component
+   */
+  //todo: full dependency graph
+  public Collection<DependencyInfo> getDependenciesForComponent(String component) {
+    return dependencies.containsKey(component) ? dependencies.get(component) :
+        Collections.<DependencyInfo>emptySet();
+  }
+
+  /**
+   * Get the service, if any, that a component dependency is conditional on.
+   *
+   * @param dependency  dependency to get conditional service for
+   *
+   * @return conditional service for provided component or null if dependency
+   *         is not conditional on a service
+   */
+  public String getConditionalServiceForDependency(DependencyInfo dependency) {
+    return dependencyConditionalServiceMap.get(dependency);
+  }
+
+  public String getExternalComponentConfig(String component) {
+    return dbDependencyInfo.get(component);
+  }
+
+  /**
+   * Obtain the required cardinality for the specified component.
+   */
+  public Cardinality getCardinality(String component) {
+    return new Cardinality(cardinalityRequirements.get(component));
+  }
+
+  /**
+   * Obtain auto-deploy information for the specified component.
+   */
+  public AutoDeployInfo getAutoDeployInfo(String component) {
+    return componentAutoDeployInfo.get(component);
+  }
+}

+ 241 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java

@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.StaticallyInject;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.ExtensionLinkRequest;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.spi.*;
+import org.apache.ambari.server.controller.spi.Resource.Type;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+
+import com.google.inject.Inject;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@StaticallyInject
+public class ExtensionLinkResourceProvider extends AbstractControllerResourceProvider {
+
+  public static final String LINK_ID_PROPERTY_ID = PropertyHelper
+      .getPropertyId("ExtensionLink", "link_id");
+
+  public static final String STACK_NAME_PROPERTY_ID = PropertyHelper
+	      .getPropertyId("ExtensionLink", "stack_name");
+
+  public static final String STACK_VERSION_PROPERTY_ID = PropertyHelper
+      .getPropertyId("ExtensionLink", "stack_version");
+
+  public static final String EXTENSION_NAME_PROPERTY_ID = PropertyHelper
+      .getPropertyId("ExtensionLink", "extension_name");
+
+  public static final String EXTENSION_VERSION_PROPERTY_ID = PropertyHelper
+      .getPropertyId("ExtensionLink", "extension_version");
+
+  private static Set<String> pkPropertyIds = new HashSet<String>(
+      Arrays.asList(new String[] { LINK_ID_PROPERTY_ID, STACK_NAME_PROPERTY_ID, STACK_VERSION_PROPERTY_ID, EXTENSION_NAME_PROPERTY_ID, EXTENSION_VERSION_PROPERTY_ID }));
+
+  @Inject
+  private static ExtensionLinkDAO dao;
+
+  protected ExtensionLinkResourceProvider(Set<String> propertyIds,
+      Map<Type, String> keyPropertyIds,
+      AmbariManagementController managementController) {
+    super(propertyIds, keyPropertyIds, managementController);
+  }
+
+  @Override
+  public RequestStatus createResources(Request request)
+	        throws SystemException, UnsupportedPropertyException,
+	        NoSuchParentResourceException, ResourceAlreadyExistsException {
+
+    final Set<ExtensionLinkRequest> requests = new HashSet<ExtensionLinkRequest>();
+    for (Map<String, Object> propertyMap : request.getProperties()) {
+      requests.add(getRequest(propertyMap));
+    }
+
+    createResources(new Command<Void>() {
+      @Override
+      public Void invoke() throws AmbariException {
+        for (ExtensionLinkRequest extensionLinkRequest : requests) {
+          getManagementController().createExtensionLink(extensionLinkRequest);
+        }
+        return null;
+      }
+    });
+
+    if (requests.size() > 0) {
+      //Need to reread the stacks/extensions directories so the latest information is available
+      try {
+        getManagementController().updateStacks();
+      } catch (AmbariException e) {
+        throw new SystemException(e.getMessage(), e);
+      }
+
+      notifyCreate(Resource.Type.ExtensionLink, request);
+    }
+
+    return getRequestStatus(null);
+  }
+
+  protected RequestStatus deleteResourcesAuthorized(Request request, Predicate predicate)
+        throws SystemException, UnsupportedPropertyException,
+        NoSuchResourceException, NoSuchParentResourceException {
+
+    final Set<ExtensionLinkRequest> requests = new HashSet<ExtensionLinkRequest>();
+    if (predicate == null) {
+      requests.add(getRequest(Collections.<String, Object>emptyMap()));
+    } else {
+      for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+        requests.add(getRequest(propertyMap));
+      }
+    }
+
+    RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
+      @Override
+      public RequestStatusResponse invoke() throws AmbariException {
+        for (ExtensionLinkRequest extensionLinkRequest : requests) {
+          getManagementController().deleteExtensionLink(extensionLinkRequest);
+        }
+        return null;
+      }
+    });
+
+    //Need to reread the stacks/extensions directories so the latest information is available
+    try {
+      getManagementController().updateStacks();
+    } catch (AmbariException e) {
+      throw new SystemException(e.getMessage(), e);
+    }
+
+    notifyDelete(Resource.Type.ExtensionLink, predicate);
+
+    return getRequestStatus(response);
+  }
+
+  @Override
+  public Set<Resource> getResources(Request request, Predicate predicate)
+        throws SystemException, UnsupportedPropertyException,
+        NoSuchResourceException, NoSuchParentResourceException {
+
+    final Set<Resource> resources = new HashSet<Resource>();
+    final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
+
+    final Set<ExtensionLinkRequest> requests = new HashSet<ExtensionLinkRequest>();
+    if (predicate == null) {
+      requests.add(getRequest(Collections.<String, Object>emptyMap()));
+    } else {
+      for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+        requests.add(getRequest(propertyMap));
+      }
+    }
+
+    Set<ExtensionLinkEntity> entities = new HashSet<ExtensionLinkEntity>();
+
+    for (ExtensionLinkRequest extensionLinkRequest : requests) {
+      verifyStackAndExtensionExist(extensionLinkRequest);
+      entities.addAll(dao.find(extensionLinkRequest));
+    }
+
+    for (ExtensionLinkEntity entity : entities) {
+      Resource resource = new ResourceImpl(Resource.Type.ExtensionLink);
+      setResourceProperty(resource, LINK_ID_PROPERTY_ID,
+		  entity.getLinkId(), requestedIds);
+      setResourceProperty(resource, STACK_NAME_PROPERTY_ID,
+	  entity.getStack().getStackName(), requestedIds);
+      setResourceProperty(resource, STACK_VERSION_PROPERTY_ID,
+          entity.getStack().getStackVersion(), requestedIds);
+      setResourceProperty(resource, EXTENSION_NAME_PROPERTY_ID,
+          entity.getExtension().getExtensionName(), requestedIds);
+      setResourceProperty(resource, EXTENSION_VERSION_PROPERTY_ID,
+          entity.getExtension().getExtensionVersion(), requestedIds);
+
+      resources.add(resource);
+    }
+    return resources;
+  }
+
+  @Override
+  public RequestStatus updateResources(Request request, Predicate predicate)
+        throws SystemException, UnsupportedPropertyException,
+        NoSuchResourceException, NoSuchParentResourceException {
+
+    //Need to reread the stacks/extensions directories so the latest information is available
+    try {
+      getManagementController().updateStacks();
+    } catch (AmbariException e) {
+      throw new SystemException(e.getMessage(), e);
+    }
+
+    notifyUpdate(Resource.Type.ExtensionLink, request, predicate);
+    return getRequestStatus(null);
+  }
+
+  private void verifyStackAndExtensionExist(ExtensionLinkRequest request) throws NoSuchParentResourceException {
+    try {
+      if (request.getStackName() != null && request.getStackVersion() != null) {
+        getManagementController().getAmbariMetaInfo().getStack(request.getStackName(), request.getStackVersion());
+      }
+      if (request.getExtensionName() != null && request.getExtensionVersion() != null) {
+        getManagementController().getAmbariMetaInfo().getExtension(request.getExtensionName(), request.getExtensionVersion());
+      }
+    }
+    catch (AmbariException ambariException) {
+      throw new NoSuchParentResourceException(ambariException.getMessage());
+    }
+  }
+
+  private ExtensionLinkRequest getRequest(Map<String, Object> properties) {
+    return new ExtensionLinkRequest(
+        (String) properties.get(LINK_ID_PROPERTY_ID),
+        (String) properties.get(STACK_NAME_PROPERTY_ID),
+        (String) properties.get(STACK_VERSION_PROPERTY_ID),
+        (String) properties.get(EXTENSION_NAME_PROPERTY_ID),
+        (String) properties.get(EXTENSION_VERSION_PROPERTY_ID));
+  }
+
+  private ExtensionLinkRequest createExtensionLinkRequest(ExtensionLinkEntity entity) {
+    if (entity == null) {
+      return null;
+    }
+
+    return new ExtensionLinkRequest(String.valueOf(entity.getLinkId()),
+        entity.getStack().getStackName(),
+        entity.getStack().getStackVersion(),
+        entity.getExtension().getExtensionName(),
+        entity.getExtension().getExtensionVersion());
+  }
+
+  @Override
+  protected Set<String> getPKPropertyIds() {
+    return pkPropertyIds;
+  }
+}

+ 121 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionResourceProvider.java

@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import java.util.*;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.ExtensionRequest;
+import org.apache.ambari.server.controller.ExtensionResponse;
+import org.apache.ambari.server.controller.spi.*;
+import org.apache.ambari.server.controller.spi.Resource.Type;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionResourceProvider extends ReadOnlyResourceProvider {
+
+  public static final String EXTENSION_NAME_PROPERTY_ID = PropertyHelper
+      .getPropertyId("Extensions", "extension_name");
+
+  private static Set<String> pkPropertyIds = new HashSet<String>(
+      Arrays.asList(new String[] { EXTENSION_NAME_PROPERTY_ID }));
+
+  protected ExtensionResourceProvider(Set<String> propertyIds,
+      Map<Type, String> keyPropertyIds,
+      AmbariManagementController managementController) {
+    super(propertyIds, keyPropertyIds, managementController);
+  }
+
+
+  @Override
+  public Set<Resource> getResources(Request request, Predicate predicate)
+      throws SystemException, UnsupportedPropertyException,
+      NoSuchResourceException, NoSuchParentResourceException {
+
+    final Set<ExtensionRequest> requests = new HashSet<ExtensionRequest>();
+
+    if (predicate == null) {
+      requests.add(getRequest(Collections.<String, Object>emptyMap()));
+    } else {
+      for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+        requests.add(getRequest(propertyMap));
+      }
+    }
+
+    Set<String> requestedIds = getRequestPropertyIds(request, predicate);
+
+    Set<ExtensionResponse> responses = getResources(new Command<Set<ExtensionResponse>>() {
+      @Override
+      public Set<ExtensionResponse> invoke() throws AmbariException {
+        return getManagementController().getExtensions(requests);
+      }
+    });
+
+    Set<Resource> resources = new HashSet<Resource>();
+
+    for (ExtensionResponse response : responses) {
+      Resource resource = new ResourceImpl(Resource.Type.Extension);
+
+      setResourceProperty(resource, EXTENSION_NAME_PROPERTY_ID,
+          response.getExtensionName(), requestedIds);
+
+      resource.setProperty(EXTENSION_NAME_PROPERTY_ID, response.getExtensionName());
+
+      resources.add(resource);
+    }
+
+    return resources;
+  }
+
+  @Override
+  public RequestStatus updateResources(Request request, Predicate predicate)
+    throws SystemException, UnsupportedPropertyException,
+    NoSuchResourceException, NoSuchParentResourceException {
+
+    RequestStatusResponse response = modifyResources(
+      new Command<RequestStatusResponse>() {
+
+      @Override
+      public RequestStatusResponse invoke() throws AmbariException {
+        //return getManagementController().updateExtensions();
+	    //TODO - do we need a separate method
+        return getManagementController().updateStacks();
+      }
+    });
+
+    notifyUpdate(Type.Extension, request, predicate);
+
+    return getRequestStatus(response);
+  }
+
+  private ExtensionRequest getRequest(Map<String, Object> properties) {
+    return new ExtensionRequest((String) properties.get(EXTENSION_NAME_PROPERTY_ID));
+  }
+
+  @Override
+  protected Set<String> getPKPropertyIds() {
+    return pkPropertyIds;
+  }
+}

+ 131 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionVersionResourceProvider.java

@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.ambari.server.controller.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.inject.Inject;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.StaticallyInject;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.ExtensionVersionRequest;
+import org.apache.ambari.server.controller.ExtensionVersionResponse;
+import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
+import org.apache.ambari.server.controller.spi.NoSuchResourceException;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.Resource.Type;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@StaticallyInject
+public class ExtensionVersionResourceProvider extends ReadOnlyResourceProvider {
+
+  public static final String EXTENSION_VERSION_PROPERTY_ID     = PropertyHelper.getPropertyId("Versions", "extension_version");
+  public static final String EXTENSION_NAME_PROPERTY_ID        = PropertyHelper.getPropertyId("Versions", "extension_name");
+  public static final String EXTENSION_VALID_PROPERTY_ID      = PropertyHelper.getPropertyId("Versions", "valid");
+  public static final String EXTENSION_ERROR_SET      = PropertyHelper.getPropertyId("Versions", "extension-errors");
+  public static final String EXTENSION_PARENT_PROPERTY_ID      = PropertyHelper.getPropertyId("Versions", "parent_extension_version");
+
+  private static Set<String> pkPropertyIds = new HashSet<String>(
+      Arrays.asList(new String[] { EXTENSION_NAME_PROPERTY_ID, EXTENSION_VERSION_PROPERTY_ID }));
+
+  protected ExtensionVersionResourceProvider(Set<String> propertyIds,
+      Map<Type, String> keyPropertyIds,
+      AmbariManagementController managementController) {
+    super(propertyIds, keyPropertyIds, managementController);
+  }
+
+  @Override
+  public Set<Resource> getResources(Request request, Predicate predicate)
+      throws SystemException, UnsupportedPropertyException,
+      NoSuchResourceException, NoSuchParentResourceException {
+
+    final Set<ExtensionVersionRequest> requests = new HashSet<ExtensionVersionRequest>();
+
+    if (predicate == null) {
+      requests.add(getRequest(Collections.<String, Object>emptyMap()));
+    } else {
+      for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
+        requests.add(getRequest(propertyMap));
+      }
+    }
+
+    Set<String> requestedIds = getRequestPropertyIds(request, predicate);
+
+    Set<ExtensionVersionResponse> responses = getResources(new Command<Set<ExtensionVersionResponse>>() {
+      @Override
+      public Set<ExtensionVersionResponse> invoke() throws AmbariException {
+        return getManagementController().getExtensionVersions(requests);
+      }
+    });
+
+    Set<Resource> resources = new HashSet<Resource>();
+
+    for (ExtensionVersionResponse response : responses) {
+      Resource resource = new ResourceImpl(Resource.Type.ExtensionVersion);
+
+      setResourceProperty(resource, EXTENSION_NAME_PROPERTY_ID,
+          response.getExtensionName(), requestedIds);
+
+      setResourceProperty(resource, EXTENSION_VERSION_PROPERTY_ID,
+          response.getExtensionVersion(), requestedIds);
+
+      setResourceProperty(resource, EXTENSION_VALID_PROPERTY_ID,
+          response.isValid(), requestedIds);
+
+      setResourceProperty(resource, EXTENSION_ERROR_SET,
+          response.getErrors(), requestedIds);
+
+      setResourceProperty(resource, EXTENSION_PARENT_PROPERTY_ID,
+        response.getParentVersion(), requestedIds);
+
+      resources.add(resource);
+    }
+
+    return resources;
+  }
+
+  private ExtensionVersionRequest getRequest(Map<String, Object> properties) {
+    return new ExtensionVersionRequest(
+        (String) properties.get(EXTENSION_NAME_PROPERTY_ID),
+        (String) properties.get(EXTENSION_VERSION_PROPERTY_ID));
+  }
+
+  @Override
+  protected Set<String> getPKPropertyIds() {
+    return pkPropertyIds;
+  }
+
+}

+ 6 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java

@@ -92,6 +92,9 @@ public interface Resource {
     Member,
     Stack,
     StackVersion,
+    ExtensionLink,
+    Extension,
+    ExtensionVersion,
     OperatingSystem,
     Repository,
     StackService,
@@ -210,6 +213,9 @@ public interface Resource {
     public static final Type Member = InternalType.Member.getType();
     public static final Type Stack = InternalType.Stack.getType();
     public static final Type StackVersion = InternalType.StackVersion.getType();
+    public static final Type ExtensionLink = InternalType.ExtensionLink.getType();
+    public static final Type Extension = InternalType.Extension.getType();
+    public static final Type ExtensionVersion = InternalType.ExtensionVersion.getType();
     public static final Type OperatingSystem = InternalType.OperatingSystem.getType();
     public static final Type Repository = InternalType.Repository.getType();
     public static final Type StackService = InternalType.StackService.getType();

+ 168 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java

@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import java.util.List;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.RequiresSession;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
+
+/**
+ * The {@link ExtensionDAO} class is used to manage the persistence and retrieval of
+ * {@link ExtensionEntity} instances.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@Singleton
+public class ExtensionDAO {
+
+  /**
+   * JPA entity manager
+   */
+  @Inject
+  private Provider<EntityManager> entityManagerProvider;
+
+  /**
+   * DAO utilities for dealing mostly with {@link TypedQuery} results.
+   */
+  @Inject
+  private DaoUtils daoUtils;
+
+  /**
+   * Gets a extension with the specified ID.
+   *
+   * @param extensionId
+   *          the ID of the extension to retrieve.
+   * @return the extension or {@code null} if none exists.
+   */
+  @RequiresSession
+  public ExtensionEntity findById(long extensionId) {
+    return entityManagerProvider.get().find(ExtensionEntity.class, extensionId);
+  }
+
+  /**
+   * Gets all of the defined extensions.
+   *
+   * @return all of the extensions loaded from resources or an empty list (never
+   *         {@code null}).
+   */
+  @RequiresSession
+  public List<ExtensionEntity> findAll() {
+    TypedQuery<ExtensionEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionEntity.findAll", ExtensionEntity.class);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
+   * Gets the extension that matches the specified name and version.
+   *
+   * @return the extension matching the specified name and version or {@code null}
+   *         if none.
+   */
+  @RequiresSession
+  public ExtensionEntity find(String extensionName, String extensionVersion) {
+    TypedQuery<ExtensionEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionEntity.findByNameAndVersion", ExtensionEntity.class);
+
+    query.setParameter("extensionName", extensionName);
+    query.setParameter("extensionVersion", extensionVersion);
+
+    return daoUtils.selectOne(query);
+  }
+
+  /**
+   * Persists a new extension instance.
+   *
+   * @param extension
+   *          the extension to persist (not {@code null}).
+   */
+  @Transactional
+  public void create(ExtensionEntity extension)
+      throws AmbariException {
+    EntityManager entityManager = entityManagerProvider.get();
+    entityManager.persist(extension);
+  }
+
+  /**
+   * Refresh the state of the extension instance from the database.
+   *
+   * @param extension
+   *          the extension to refresh (not {@code null}).
+   */
+  @Transactional
+  public void refresh(ExtensionEntity extension) {
+    entityManagerProvider.get().refresh(extension);
+  }
+
+  /**
+   * Merge the specified extension with the existing extension in the database.
+   *
+   * @param extension
+   *          the extension to merge (not {@code null}).
+   * @return the updated extension with merged content (never {@code null}).
+   */
+  @Transactional
+  public ExtensionEntity merge(ExtensionEntity extension) {
+    return entityManagerProvider.get().merge(extension);
+  }
+
+  /**
+   * Creates or updates the specified entity. This method will check
+   * {@link ExtensionEntity#getStackId()} in order to determine whether the entity
+   * should be created or merged.
+   *
+   * @param extension
+   *          the extension to create or update (not {@code null}).
+   */
+  public void createOrUpdate(ExtensionEntity extension)
+      throws AmbariException {
+    if (null == extension.getExtensionId()) {
+      create(extension);
+    } else {
+      merge(extension);
+    }
+  }
+
+  /**
+   * Removes the specified extension and all related clusters, services and
+   * components.
+   *
+   * @param extension
+   *          the extension to remove.
+   */
+  @Transactional
+  public void remove(ExtensionEntity extension) {
+    EntityManager entityManager = entityManagerProvider.get();
+    extension = findById(extension.getExtensionId());
+    if (null != extension) {
+      entityManager.remove(extension);
+    }
+  }
+}

+ 240 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java

@@ -0,0 +1,240 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.dao;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.ExtensionLinkRequest;
+import org.apache.ambari.server.orm.RequiresSession;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+import com.google.inject.persist.Transactional;
+
+/**
+ * The {@link ExtensionLinkDAO} class is used to manage the persistence and retrieval of
+ * {@link ExtensionLinkEntity} instances.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@Singleton
+public class ExtensionLinkDAO {
+
+  /**
+   * JPA entity manager
+   */
+  @Inject
+  private Provider<EntityManager> entityManagerProvider;
+
+  /**
+   * DAO utilities for dealing mostly with {@link TypedQuery} results.
+   */
+  @Inject
+  private DaoUtils daoUtils;
+
+
+  /**
+   * Gets the extension links that match the specified stack name and version.
+   *
+   * @return the extension links  matching the specified stack name and version if any.
+   */
+  @RequiresSession
+  public List<ExtensionLinkEntity> find(ExtensionLinkRequest request) {
+    if (request.getLinkId() != null) {
+      ExtensionLinkEntity entity = findById(new Long(request.getLinkId()));
+      List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
+      list.add(entity);
+      return list;
+    }
+
+    String stackName = request.getStackName();
+    String stackVersion = request.getStackName();
+    String extensionName = request.getStackName();
+    String extensionVersion = request.getStackName();
+
+    if (stackName != null && stackVersion != null) {
+      if (extensionName != null && extensionVersion != null) {
+        ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
+        List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
+        list.add(entity);
+        return list;
+      }
+      return findByStack(stackName, stackVersion);
+    }
+    if (extensionName != null && extensionVersion != null) {
+      return findByExtension(extensionName, extensionVersion);
+    }
+
+    return findAll();
+  }
+
+  /**
+   * Gets an extension link with the specified ID.
+   *
+   * @param linkId
+   *          the ID of the extension link to retrieve.
+   * @return the extension or {@code null} if none exists.
+   */
+  @RequiresSession
+  public ExtensionLinkEntity findById(long linkId) {
+    return entityManagerProvider.get().find(ExtensionLinkEntity.class, linkId);
+  }
+
+  /**
+   * Gets all of the defined extension links.
+   *
+   * @return all of the extension links loaded from resources or an empty list (never
+   *         {@code null}).
+   */
+  @RequiresSession
+  public List<ExtensionLinkEntity> findAll() {
+    TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionLinkEntity.findAll", ExtensionLinkEntity.class);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
+   * Gets the extension links that match the specified extension name and version.
+   *
+   * @return the extension links matching the specified extension name and version if any.
+   */
+  @RequiresSession
+  public List<ExtensionLinkEntity> findByExtension(String extensionName, String extensionVersion) {
+    TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionLinkEntity.findByExtension", ExtensionLinkEntity.class);
+
+    query.setParameter("extensionName", extensionName);
+    query.setParameter("extensionVersion", extensionVersion);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
+   * Gets the extension links that match the specified stack name and version.
+   *
+   * @return the extension links  matching the specified stack name and version if any.
+   */
+  @RequiresSession
+  public List<ExtensionLinkEntity> findByStack(String stackName, String stackVersion) {
+    TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionLinkEntity.findByStack", ExtensionLinkEntity.class);
+
+    query.setParameter("stackName", stackName);
+    query.setParameter("stackVersion", stackVersion);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
+   * Gets the extension link that match the specified stack name, stack version, extension name and extension version.
+   *
+   * @return the extension link matching the specified stack name, stack version, extension name and extension version if any.
+   */
+  @RequiresSession
+  public ExtensionLinkEntity findByStackAndExtension(String stackName, String stackVersion, String extensionName, String extensionVersion) {
+    TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionLinkEntity.findByStackAndExtension", ExtensionLinkEntity.class);
+
+    query.setParameter("stackName", stackName);
+    query.setParameter("stackVersion", stackVersion);
+    query.setParameter("extensionName", extensionName);
+    query.setParameter("extensionVersion", extensionVersion);
+
+    return daoUtils.selectOne(query);
+  }
+
+  /**
+   * Persists a new extension link instance.
+   *
+   * @param link
+   *          the extension link to persist (not {@code null}).
+   */
+  @Transactional
+  public void create(ExtensionLinkEntity link)
+      throws AmbariException {
+    EntityManager entityManager = entityManagerProvider.get();
+    entityManager.persist(link);
+  }
+
+  /**
+   * Refresh the state of the extension instance from the database.
+   *
+   * @param link
+   *          the extension link to refresh (not {@code null}).
+   */
+  @Transactional
+  public void refresh(ExtensionLinkEntity link) {
+    entityManagerProvider.get().refresh(link);
+  }
+
+  /**
+   * Merge the specified extension link with the existing extension link in the database.
+   *
+   * @param link
+   *          the extension link to merge (not {@code null}).
+   * @return the updated extension link with merged content (never {@code null}).
+   */
+  @Transactional
+  public ExtensionLinkEntity merge(ExtensionLinkEntity link) {
+    return entityManagerProvider.get().merge(link);
+  }
+
+  /**
+   * Creates or updates the specified entity. This method will check
+   * {@link ExtensionLinkEntity#getLinkId()} in order to determine whether the entity
+   * should be created or merged.
+   *
+   * @param extension
+   *          the link to create or update (not {@code null}).
+   */
+  public void createOrUpdate(ExtensionLinkEntity link)
+      throws AmbariException {
+    if (null == link.getLinkId()) {
+      create(link);
+    } else {
+      merge(link);
+    }
+  }
+
+  /**
+   * Removes the specified extension link
+   *
+   * @param link
+   *          the extension link to remove.
+   */
+  @Transactional
+  public void remove(ExtensionLinkEntity link) {
+    EntityManager entityManager = entityManagerProvider.get();
+    link = findById(link.getLinkId());
+    if (null != link) {
+      entityManager.remove(link);
+    }
+  }
+}

+ 156 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionEntity.java

@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.entities;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
+import javax.persistence.Table;
+import javax.persistence.TableGenerator;
+import javax.persistence.UniqueConstraint;
+
+/**
+ * The {@link ExtensionEntity} class is used to model an extension to the stack.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@Entity
+@Table(name = "extension", uniqueConstraints = @UniqueConstraint(columnNames = {
+    "extension_name", "extension_version" }))
+@TableGenerator(name = "extension_id_generator", table = "ambari_sequences", pkColumnName = "sequence_name", valueColumnName = "sequence_value", pkColumnValue = "extension_id_seq", initialValue = 0)
+@NamedQueries({
+    @NamedQuery(name = "ExtensionEntity.findAll", query = "SELECT extension FROM ExtensionEntity extension"),
+    @NamedQuery(name = "ExtensionEntity.findByNameAndVersion", query = "SELECT extension FROM ExtensionEntity extension WHERE extension.extensionName = :extensionName AND extension.extensionVersion = :extensionVersion") })
+public class ExtensionEntity {
+
+  @Id
+  @GeneratedValue(strategy = GenerationType.TABLE, generator = "extension_id_generator")
+  @Column(name = "extension_id", nullable = false, updatable = false)
+  private Long extensionId;
+
+  @Column(name = "extension_name", length = 255, nullable = false)
+  private String extensionName;
+
+  @Column(name = "extension_version", length = 255, nullable = false)
+  private String extensionVersion;
+
+  /**
+   * Constructor.
+   */
+  public ExtensionEntity() {
+  }
+
+  /**
+   * Gets the unique identifier for this extension.
+   *
+   * @return the ID.
+   */
+  public Long getExtensionId() {
+    return extensionId;
+  }
+
+  /**
+   * Gets the name of the extension.
+   *
+   * @return the name of the extension (never {@code null}).
+   */
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  /**
+   * Sets the name of the extension.
+   *
+   * @param extensionName
+   *          the extension name (not {@code null}).
+   */
+  public void setExtensionName(String extensionName) {
+    this.extensionName = extensionName;
+  }
+
+  /**
+   * Gets the version of the extension.
+   *
+   * @return the extension version (never {@code null}).
+   */
+  public String getExtensionVersion() {
+    return extensionVersion;
+  }
+
+  /**
+   * Sets the version of the extension.
+   *
+   * @param extensionVersion
+   *          the extension version (not {@code null}).
+   */
+  public void setExtensionVersion(String extensionVersion) {
+    this.extensionVersion = extensionVersion;
+  }
+
+  /**
+   *
+   */
+  @Override
+  public boolean equals(Object object) {
+    if (this == object) {
+      return true;
+    }
+
+    if (object == null || getClass() != object.getClass()) {
+      return false;
+    }
+
+    ExtensionEntity that = (ExtensionEntity) object;
+
+    if (extensionId != null ? !extensionId.equals(that.extensionId) : that.extensionId != null) {
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
+   *
+   */
+  @Override
+  public int hashCode() {
+    int result = null != extensionId ? extensionId.hashCode() : 0;
+    return result;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    StringBuilder buffer = new StringBuilder();
+    buffer.append(getClass().getSimpleName());
+    buffer.append("{");
+    buffer.append("id=").append(extensionId);
+    buffer.append(", name=").append(extensionName);
+    buffer.append(", version=").append(extensionVersion);
+    buffer.append("}");
+    return buffer.toString();
+  }
+}

+ 139 - 0
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java

@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm.entities;
+
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.GeneratedValue;
+import javax.persistence.GenerationType;
+import javax.persistence.Id;
+import javax.persistence.JoinColumn;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
+import javax.persistence.OneToOne;
+import javax.persistence.Table;
+import javax.persistence.TableGenerator;
+import javax.persistence.UniqueConstraint;
+
+/**
+ * The {@link ExtensionLinkEntity} class is used to model the extensions linked to the stack.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@Table(name = "extensionlink", uniqueConstraints = @UniqueConstraint(columnNames = {
+		"stack_id", "extension_id" }))
+@TableGenerator(name = "link_id_generator", table = "ambari_sequences", pkColumnName = "sequence_name", valueColumnName = "sequence_value", pkColumnValue = "link_id_seq", initialValue = 0)
+@NamedQueries({
+    @NamedQuery(name = "ExtensionLinkEntity.findAll", query = "SELECT link FROM ExtensionLinkEntity link"),
+    @NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion"),
+    @NamedQuery(name = "ExtensionLinkEntity.findByStack", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion"),
+    @NamedQuery(name = "ExtensionLinkEntity.findByExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion") })
+@Entity
+public class ExtensionLinkEntity {
+
+  @Id
+  @GeneratedValue(strategy = GenerationType.TABLE, generator = "link_id_generator")
+  @Column(name = "link_id", nullable = false, updatable = false)
+  private Long linkId;
+
+  @OneToOne
+  @JoinColumn(name = "stack_id", unique = false, nullable = false, insertable = true, updatable = false)
+  private StackEntity stack;
+
+  @OneToOne
+  @JoinColumn(name = "extension_id", unique = false, nullable = false, insertable = true, updatable = false)
+  private ExtensionEntity extension;
+
+  /**
+   * Constructor.
+   */
+  public ExtensionLinkEntity() {
+  }
+
+  public Long getLinkId() {
+    return linkId;
+  }
+
+  public void setLinkId(Long linkId) {
+    this.linkId = linkId;
+  }
+
+  public StackEntity getStack() {
+    return stack;
+  }
+
+  public void setStack(StackEntity stack) {
+    this.stack = stack;
+  }
+
+  public ExtensionEntity getExtension() {
+    return extension;
+  }
+
+  public void setExtension(ExtensionEntity extension) {
+    this.extension = extension;
+  }
+
+  /**
+   *
+   */
+  @Override
+  public boolean equals(Object object) {
+    if (this == object) {
+      return true;
+    }
+
+    if (object == null || getClass() != object.getClass()) {
+      return false;
+    }
+
+    ExtensionLinkEntity that = (ExtensionLinkEntity) object;
+
+    if (linkId != null ? !linkId.equals(that.linkId) : that.linkId != null) {
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
+   *
+   */
+  @Override
+  public int hashCode() {
+    int result = (null != linkId) ? linkId.hashCode() : 0;
+    return result;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    StringBuilder buffer = new StringBuilder();
+    buffer.append(getClass().getSimpleName());
+    buffer.append("{");
+    buffer.append("linkId=").append(linkId);
+    buffer.append(", stackId=").append(stack.getStackId());
+    buffer.append(", extensionId=").append(extension.getExtensionId());
+    buffer.append("}");
+    return buffer.toString();
+  }
+}

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/stack/BaseModule.java

@@ -60,7 +60,7 @@ public abstract class BaseModule<T, I> implements StackDefinitionModule<T, I> {
    * @return collection of the merged modules
    */
   protected <T extends StackDefinitionModule<T, ?>> Collection<T> mergeChildModules(
-      Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, T> modules, Map<String, T> parentModules)
+      Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions, Map<String, T> modules, Map<String, T> parentModules)
         throws AmbariException {
     Set<String> addedModules = new HashSet<String>();
     Collection<T> mergedModules = new HashSet<T>();
@@ -70,7 +70,7 @@ public abstract class BaseModule<T, I> implements StackDefinitionModule<T, I> {
       addedModules.add(id);
       if (!module.isDeleted()) {
         if (parentModules.containsKey(id)) {
-          module.resolve(parentModules.get(id), allStacks, commonServices);
+          module.resolve(parentModules.get(id), allStacks, commonServices, extensions);
         }
         mergedModules.add(module);
       }

+ 2 - 5
ambari-server/src/main/java/org/apache/ambari/server/stack/CommonServiceDirectory.java

@@ -62,15 +62,13 @@ public class CommonServiceDirectory extends ServiceDirectory {
 
   @Override
   /**
-   * Parse common service directory
+   * Calculate the common service directories
    * packageDir Format: common-services/<serviceName>/<serviceVersion>/package
    * Example:
    *  directory: "/var/lib/ambari-server/resources/common-services/HDFS/1.0"
    *  packageDir: "common-services/HDFS/1.0/package"
-   *
-   * @throws AmbariException
    */
-  protected void parsePath() throws AmbariException {
+  protected void calculateDirectories() {
     File serviceVersionDir = new File(getAbsolutePath());
     File serviceDir = serviceVersionDir.getParentFile();
 
@@ -95,6 +93,5 @@ public class CommonServiceDirectory extends ServiceDirectory {
       LOG.debug(String.format("Service upgrades folder %s for common service %s does not exist.",
           absUpgradesDir, serviceId ));
     }
-    parseMetaInfoFile();
   }
 }

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/ComponentModule.java

@@ -58,7 +58,8 @@ public class ComponentModule extends BaseModule<ComponentModule, ComponentInfo>
   }
 
   @Override
-  public void resolve(ComponentModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) {
+  public void resolve(ComponentModule parent, Map<String, StackModule> allStacks,
+	    Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) {
     if (parent != null) {
       ComponentInfo parentInfo = parent.getModuleInfo();
       if (!parent.isValid()) {

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationModule.java

@@ -82,7 +82,8 @@ public class ConfigurationModule extends BaseModule<ConfigurationModule, Configu
   }
 
   @Override
-  public void resolve(ConfigurationModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException {
+  public void resolve(ConfigurationModule parent, Map<String, StackModule> allStacks,
+	    Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException {
     // merge properties also removes deleted props so should be called even if extension is disabled
     if (parent != null) {
       if (parent.info != null) {

+ 196 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionDirectory.java

@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.state.stack.RepositoryXml;
+import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
+import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.commons.io.FilenameUtils;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.xml.bind.JAXBException;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+/**
+ * Encapsulates IO operations on a extension definition extension directory.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+//todo: Normalize all path return values.
+//todo: Currently some are relative and some are absolute.
+//todo: Current values were dictated by the ExtensionInfo expectations.
+public class ExtensionDirectory extends StackDefinitionDirectory {
+
+  /**
+   * collection of service directories
+   */
+  private Collection<ServiceDirectory> serviceDirectories;
+
+  /**
+   * metainfo file representation
+   */
+  private ExtensionMetainfoXml metaInfoXml;
+
+  /**
+   * file unmarshaller
+   */
+  ModuleFileUnmarshaller unmarshaller = new ModuleFileUnmarshaller();
+
+  /**
+   * extensions directory name
+   */
+  public final static String EXTENSIONS_FOLDER_NAME = "extensions";
+
+  /**
+   * metainfo file name
+   */
+  private static final String EXTENSION_METAINFO_FILE_NAME = "metainfo.xml";
+
+  /**
+   * logger instance
+   */
+  private final static Logger LOG = LoggerFactory.getLogger(ExtensionDirectory.class);
+
+
+  /**
+   * Constructor.
+   *
+   * @param directory  extension directory
+   * @throws AmbariException if unable to parse the stack directory
+   */
+  public ExtensionDirectory(String directory) throws AmbariException {
+    super(directory);
+    parsePath();
+  }
+
+  /**
+   * Obtain the extension directory name.
+   *
+   * @return extension directory name
+   */
+  public String getExtensionDirName() {
+    return getDirectory().getParentFile().getName();
+  }
+
+  /**
+   * Obtain the object representation of the extension metainfo.xml file.
+   *
+   * @return object representation of the extension metainfo.xml file
+   */
+  public ExtensionMetainfoXml getMetaInfoFile() {
+    return metaInfoXml;
+  }
+
+  /**
+   * Obtain a collection of all service directories.
+   *
+   * @return collection of all service directories
+   */
+  public Collection<ServiceDirectory> getServiceDirectories() {
+    return serviceDirectories;
+  }
+
+  /**
+   * Parse the extension directory.
+   *
+   * @throws AmbariException if unable to parse the directory
+   */
+  private void parsePath() throws AmbariException {
+    Collection<String> subDirs = Arrays.asList(directory.list());
+    parseServiceDirectories(subDirs);
+    parseMetaInfoFile();
+  }
+
+  /**
+   * Parse the extension metainfo file.
+   *
+   * @throws AmbariException if unable to parse the extension metainfo file
+   */
+  private void parseMetaInfoFile() throws AmbariException {
+    File extensionMetaInfoFile = new File(getAbsolutePath()
+        + File.separator + EXTENSION_METAINFO_FILE_NAME);
+
+    //todo: is it ok for this file not to exist?
+    if (extensionMetaInfoFile.exists()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Reading extension version metainfo from file " + extensionMetaInfoFile.getAbsolutePath());
+      }
+
+      try {
+        metaInfoXml = unmarshaller.unmarshal(ExtensionMetainfoXml.class, extensionMetaInfoFile);
+      } catch (JAXBException e) {
+        metaInfoXml = new ExtensionMetainfoXml();
+        metaInfoXml.setValid(false);
+        metaInfoXml.addError("Unable to parse extension metainfo.xml file at location: " +
+            extensionMetaInfoFile.getAbsolutePath());
+      }
+    }
+  }
+
+  /**
+   * Parse the extension's service directories extension
+   * @param subDirs  extension sub directories
+   * @throws AmbariException  if unable to parse the service directories
+   */
+  private void parseServiceDirectories(Collection<String> subDirs) throws AmbariException {
+    Collection<ServiceDirectory> dirs = new HashSet<ServiceDirectory>();
+
+    if (subDirs.contains(ServiceDirectory.SERVICES_FOLDER_NAME)) {
+      String servicesDir = getAbsolutePath() + File.separator + ServiceDirectory.SERVICES_FOLDER_NAME;
+      File baseServiceDir = new File(servicesDir);
+      File[] serviceFolders = baseServiceDir.listFiles(AmbariMetaInfo.FILENAME_FILTER);
+      if (serviceFolders != null) {
+        for (File d : serviceFolders) {
+          if (d.isDirectory()) {
+            try {
+              dirs.add(new StackServiceDirectory(d.getAbsolutePath()));
+            } catch (AmbariException e) {
+              //todo: this seems as though we should propagate this exception
+              //todo: eating it now to keep backwards compatibility
+              LOG.warn(String.format("Unable to parse extension definition service at '%s'.  Ignoring service. : %s",
+                  d.getAbsolutePath(), e.toString()));
+            }
+          }
+        }
+      }
+    }
+
+    if (dirs.isEmpty()) {
+      //todo: what does it mean for a extension to have no services?
+      LOG.info("The extension defined at '" + getAbsolutePath() + "' contains no services");
+    }
+    serviceDirectories = dirs;
+  }
+}

+ 167 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java

@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ExtensionInfo;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionHelper {
+
+  public static void validateDeleteLink(Clusters clusters, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    validateNotRequiredExtension(stack, extension);
+    validateServicesNotInstalled(clusters, stack, extension);
+  }
+
+  private static void validateServicesNotInstalled(Clusters clusters, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    for (Cluster cluster : clusters.getClusters().values()) {
+      for (ServiceInfo service : extension.getServices()) {
+        try {
+          if (service != null && cluster.getService(service.getName()) != null) {
+            String message = "Extension service is still installed"
+                        + ", stackName=" + stack.getName()
+                        + ", stackVersion=" + stack.getVersion()
+                        + ", service=" + service.getName()
+                        + ", extensionName=" + extension.getName()
+                        + ", extensionVersion=" + extension.getVersion();
+
+            throw new AmbariException(message);
+          }
+        }
+        catch (ServiceNotFoundException e) {
+          //Eat the exception
+        }
+      }
+    }
+  }
+
+  public static void validateCreateLink(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    validateSupportedStackVersion(stack, extension);
+    validateServiceDuplication(stack, extension);
+    validateRequiredExtensions(stack, extension);
+  }
+
+  private static void validateSupportedStackVersion(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    for (ExtensionMetainfoXml.Stack validStack : extension.getStacks()) {
+      if (validStack.getName().equals(stack.getName())) {
+        String minStackVersion = validStack.getVersion();
+        if (VersionUtils.compareVersions(stack.getVersion(), minStackVersion) >= 0) {
+          //Found a supported stack version
+          return;
+        }
+      }
+    }
+
+    String message = "Stack is not supported by extension"
+		+ ", stackName=" + stack.getName()
+		+ ", stackVersion=" + stack.getVersion()
+		+ ", extensionName=" + extension.getName()
+		+ ", extensionVersion=" + extension.getVersion();
+
+    throw new AmbariException(message);
+  }
+
+  private static void validateServiceDuplication(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    for (ServiceInfo service : extension.getServices()) {
+      if (service != null) {
+        ServiceInfo stackService = null;
+        try {
+          stackService = stack.getService(service.getName());
+        }
+        catch (Exception e) {
+          //Eat the exception
+        }
+        if (stackService != null) {
+          String message = "Existing service is included in extension"
+                      + ", stackName=" + stack.getName()
+                      + ", stackVersion=" + stack.getVersion()
+                      + ", service=" + service.getName()
+                      + ", extensionName=" + extension.getName()
+                      + ", extensionVersion=" + extension.getVersion();
+
+          throw new AmbariException(message);
+        }
+      }
+    }
+  }
+
+  private static void validateRequiredExtensions(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    for (ExtensionMetainfoXml.Extension requiredExtension : extension.getExtensions()) {
+      if (requiredExtension != null) {
+        String message = "Stack has not linked required extension"
+                    + ", stackName=" + stack.getName()
+                    + ", stackVersion=" + stack.getVersion()
+                    + ", extensionName=" + extension.getName()
+                    + ", extensionVersion=" + extension.getVersion()
+                    + ", requiredExtensionName=" + requiredExtension.getName()
+                    + ", requiredExtensionVersion=" + requiredExtension.getVersion();
+        try {
+          ExtensionInfo stackExtension = stack.getExtension(requiredExtension.getName());
+          if (stackExtension != null) {
+            String version = requiredExtension.getVersion();
+            if (version.endsWith("*")) {
+              version = version.substring(0, version.length() - 1);
+              if (!stackExtension.getVersion().startsWith(version)) {
+                throw new AmbariException(message);
+              }
+            }
+            else if (!stackExtension.getVersion().equals(version)) {
+              throw new AmbariException(message);
+            }
+          }
+        }
+        catch (Exception e) {
+          throw new AmbariException(message, e);
+        }
+      }
+    }
+  }
+
+  private static void validateNotRequiredExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    for (ExtensionInfo stackExtension : stack.getExtensions()) {
+      if (stackExtension != null) {
+        for (ExtensionMetainfoXml.Extension requiredExtension : stackExtension.getExtensions()) {
+          if (requiredExtension != null && requiredExtension.getName().equals(extension.getName())) {
+            String message = "Stack extension is required by extension"
+                        + ", stackName=" + stack.getName()
+                        + ", stackVersion=" + stack.getVersion()
+                        + ", extensionName=" + extension.getName()
+                        + ", extensionVersion=" + extension.getVersion()
+                        + ", dependentExtensionName=" + stackExtension.getName()
+                        + ", dependentExtensionVersion=" + stackExtension.getVersion();
+
+            throw new AmbariException(message);
+          }
+        }
+      }
+    }
+  }
+
+}

+ 540 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java

@@ -0,0 +1,540 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.ExtensionInfo;
+import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RepositoryInfo;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.state.stack.RepositoryXml;
+import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Extension module which provides all functionality related to parsing and fully
+ * resolving extensions from the extension definition.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ *
+ * <p>
+ * Each extension node is identified by name and version, contains service
+ * child nodes and may extend a single parent extension.
+ * </p>
+ *
+ * <p>
+ * Resolution of a extension is a depth first traversal up the inheritance chain where each extension node
+ * calls resolve on its parent before resolving itself.  After the parent resolve call returns, all
+ * ancestors in the inheritance tree are fully resolved.  The act of resolving the extension includes
+ * resolution of the services children of the extension as well as merging of other extension
+ * state with the fully resolved parent.
+ * </p>
+ *
+ * <p>
+ * Because a service may explicitly extend another service in a extension outside of the inheritance tree,
+ * service child node resolution involves a depth first resolution of the extension associated with the
+ * services explicit parent, if any.  This follows the same steps defined above fore extension node
+ * resolution.  After the services explicit parent is fully resolved, the services state is merged
+ * with it's parent.
+ * </p>
+ *
+ * <p>
+ * If a cycle in a extension definition is detected, an exception is thrown from the resolve call.
+ * </p>
+ *
+ */
+public class ExtensionModule extends BaseModule<ExtensionModule, ExtensionInfo> implements Validable {
+
+  /**
+   * Context which provides access to external functionality
+   */
+  private StackContext stackContext;
+
+  /**
+   * Map of child configuration modules keyed by configuration type
+   */
+  private Map<String, ConfigurationModule> configurationModules = new HashMap<String, ConfigurationModule>();
+
+  /**
+   * Map of child service modules keyed by service name
+   */
+  private Map<String, ServiceModule> serviceModules = new HashMap<String, ServiceModule>();
+
+  /**
+   * Corresponding ExtensionInfo instance
+   */
+  private ExtensionInfo extensionInfo;
+
+  /**
+   * Encapsulates IO operations on extension directory
+   */
+  private ExtensionDirectory extensionDirectory;
+
+  /**
+   * Extension id which is in the form extensionName:extensionVersion
+   */
+  private String id;
+
+  /**
+   * validity flag
+   */
+  protected boolean valid = true;
+
+  /**
+   * Logger
+   */
+  private final static Logger LOG = LoggerFactory.getLogger(ExtensionModule.class);
+
+  /**
+   * Constructor.
+   * @param extensionDirectory  represents extension directory
+   * @param extensionContext    general extension context
+   */
+  public ExtensionModule(ExtensionDirectory extensionDirectory, StackContext stackContext) {
+    this.extensionDirectory = extensionDirectory;
+    this.stackContext = stackContext;
+    this.extensionInfo = new ExtensionInfo();
+    populateExtensionInfo();
+  }
+
+  public Map<String, ServiceModule> getServiceModules() {
+	  return serviceModules;
+  }
+
+  /**
+   * Fully resolve the extension. See extension resolution description in the class documentation.
+   * If the extension has a parent, this extension will be merged against its fully resolved parent
+   * if one is specified. Merging applies to all extension state including child service and
+   * configuration modules.  Services may extend a service in another version in the
+   * same extension hierarchy or may explicitly extend a service in a different
+   * hierarchy.
+   *
+   * @param parentModule   not used.  Each extension determines its own parent since extensions don't
+   *                       have containing modules
+   * @param allStacks      all stacks modules contained in the stack definition
+   * @param commonServices all common services
+   * @param extensions     all extensions
+   *
+   * @throws AmbariException if an exception occurs during extension resolution
+   */
+  @Override
+  public void resolve(
+      ExtensionModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+    moduleState = ModuleState.VISITED;
+    checkExtensionName(allStacks);
+
+    String parentVersion = extensionInfo.getParentExtensionVersion();
+    mergeServicesWithExplicitParent(allStacks, commonServices, extensions);
+    // merge with parent version of same extension definition
+    if (parentVersion != null) {
+      mergeExtensionWithParent(parentVersion, allStacks, commonServices, extensions);
+    }
+    moduleState = ModuleState.RESOLVED;
+  }
+
+  @Override
+  public ExtensionInfo getModuleInfo() {
+    return extensionInfo;
+  }
+
+  @Override
+  public boolean isDeleted() {
+    return false;
+  }
+
+  @Override
+  public String getId() {
+    return id;
+  }
+
+  @Override
+  public void finalizeModule() {
+    finalizeChildModules(serviceModules.values());
+    finalizeChildModules(configurationModules.values());
+  }
+
+  /**
+   * Get the associated extension directory.
+   *
+   * @return associated extension directory
+   */
+  public ExtensionDirectory getExtensionDirectory() {
+    return extensionDirectory;
+  }
+
+  /**
+   * Merge the extension with its parent.
+   *
+   * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
+   * @param parentVersion  version of the extensions parent
+   *
+   * @throws AmbariException if an exception occurs merging with the parent
+   */
+  private void mergeExtensionWithParent(
+      String parentVersion, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+
+    String parentExtensionKey = extensionInfo.getName() + StackManager.PATH_DELIMITER + parentVersion;
+    ExtensionModule parentExtension = extensions.get(parentExtensionKey);
+
+    if (parentExtension == null) {
+      throw new AmbariException("Extension '" + extensionInfo.getName() + ":" + extensionInfo.getVersion() +
+          "' specifies a parent that doesn't exist");
+    }
+
+    resolveExtension(parentExtension, allStacks, commonServices, extensions);
+    /*mergeConfigurations(parentStack, allStacks, commonServices);
+    mergeRoleCommandOrder(parentStack);*/
+
+    /*if (extensionInfo.getStackHooksFolder() == null) {
+      extensionInfo.setStackHooksFolder(parentStack.getModuleInfo().getStackHooksFolder());
+    }
+
+    if (extensionInfo.getKerberosDescriptorFileLocation() == null) {
+      extensionInfo.setKerberosDescriptorFileLocation(parentStack.getModuleInfo().getKerberosDescriptorFileLocation());
+    }
+
+    if (extensionInfo.getWidgetsDescriptorFileLocation() == null) {
+      extensionInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
+    }*/
+
+    mergeServicesWithParent(parentExtension, allStacks, commonServices, extensions);
+  }
+
+  /**
+   * Merge child services with parent extension.
+   *
+   * @param parentExtension    parent extension module
+   * @param allStacks          all stacks in stack definition
+   * @param commonServices     all common services
+   * @param extensions         all extensions
+   *
+   * @throws AmbariException if an exception occurs merging the child services with the parent extension
+   */
+  private void mergeServicesWithParent(
+      ExtensionModule parentExtension, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+    extensionInfo.getServices().clear();
+
+    LOG.info("***Merging extension services with parent: " + parentExtension.getId());
+
+    Collection<ServiceModule> mergedModules = mergeChildModules(
+        allStacks, commonServices, extensions, serviceModules, parentExtension.serviceModules);
+    for (ServiceModule module : mergedModules) {
+      serviceModules.put(module.getId(), module);
+      extensionInfo.getServices().add(module.getModuleInfo());
+    }
+  }
+
+  /**
+   * Merge services with their explicitly specified parent if one has been specified.
+   * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
+   *
+   * @throws AmbariException if an exception occurs while merging child services with their explicit parents
+   */
+  private void mergeServicesWithExplicitParent(
+        Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException {
+    for (ServiceModule service : serviceModules.values()) {
+      ServiceInfo serviceInfo = service.getModuleInfo();
+      String parent = serviceInfo.getParent();
+      if (parent != null) {
+        mergeServiceWithExplicitParent(service, parent, allStacks, commonServices, extensions);
+      }
+    }
+  }
+
+  /**
+   * Merge a service with its explicitly specified parent.
+   * @param service          the service to merge
+   * @param parent           the explicitly specified parent service
+   * @param allStacks        all stacks specified in the stack definition
+   * @param commonServices   all common services specified in the stack definition
+   *
+   * @throws AmbariException if an exception occurs merging a service with its explicit parent
+   */
+  private void mergeServiceWithExplicitParent(
+      ServiceModule service, String parent, Map<String, StackModule> allStacks,
+      Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+    if(isCommonServiceParent(parent)) {
+      LOG.info("merging with common service: " + service.getModuleInfo().getName());
+      mergeServiceWithCommonServiceParent(service, parent, allStacks, commonServices, extensions);
+      LOG.info("display name: " + service.getModuleInfo().getDisplayName());
+    } else {
+      throw new AmbariException("The service '" + service.getModuleInfo().getName() + "' in extension '" + extensionInfo.getName() + ":"
+          + extensionInfo.getVersion() + "' extends an invalid parent: '" + parent + "'");
+    }
+  }
+
+  /**
+   * @param allStacks        all stacks specified in the stack definition
+   *
+   * @throws AmbariException if the extension name is the same as any of the stacks
+   */
+  private void checkExtensionName(Map<String, StackModule> allStacks)
+      throws AmbariException {
+
+    String name = extensionInfo.getName();
+    for (StackModule stack : allStacks.values()) {
+      String stackName = stack.getModuleInfo().getName();
+      if (name.equals(stackName)) {
+        throw new AmbariException("The extension '" + name + "' has a name which matches a stack name");
+      }
+    }
+  }
+
+  /**
+   * Check if parent is common service
+   * @param parent  Parent string
+   * @return true: if parent is common service, false otherwise
+   */
+  private boolean isCommonServiceParent(String parent) {
+    return parent != null
+        && !parent.isEmpty()
+        && parent.split(StackManager.PATH_DELIMITER)[0].equalsIgnoreCase(StackManager.COMMON_SERVICES);
+  }
+
+  /**
+   * Merge a service with its explicitly specified common service as parent.
+   * Parent: common-services/<serviceName>/<serviceVersion>
+   * Common Services Lookup Key: <serviceName>/<serviceVersion>
+   * Example:
+   *  Parent: common-services/HDFS/2.1.0.2.0
+   *  Key: HDFS/2.1.0.2.0
+   *
+   * @param service          the service to merge
+   * @param parent           the explicitly specified common service as parent
+   * @param allStacks        all stacks specified in the stack definition
+   * @param commonServices   all common services specified in the stack definition
+   * @throws AmbariException
+   */
+  private void mergeServiceWithCommonServiceParent(
+      ServiceModule service, String parent, Map<String, StackModule> allStacks,
+      Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+    ServiceInfo serviceInfo = service.getModuleInfo();
+    String[] parentToks = parent.split(StackManager.PATH_DELIMITER);
+    if(parentToks.length != 3 || !parentToks[0].equalsIgnoreCase(StackManager.COMMON_SERVICES)) {
+      throw new AmbariException("The service '" + serviceInfo.getName() + "' in extension '" + extensionInfo.getName() + ":"
+          + extensionInfo.getVersion() + "' extends an invalid parent: '" + parent + "'");
+    }
+
+    String baseServiceKey = parentToks[1] + StackManager.PATH_DELIMITER + parentToks[2];
+    ServiceModule baseService = commonServices.get(baseServiceKey);
+    if (baseService == null) {
+      setValid(false);
+      extensionInfo.setValid(false);
+      String error = "The service '" + serviceInfo.getName() + "' in extension '" + extensionInfo.getName() + ":"
+          + extensionInfo.getVersion() + "' extends a non-existent service: '" + parent + "'";
+      addError(error);
+      extensionInfo.addError(error);
+    } else {
+      if (baseService.isValid()) {
+        service.resolveExplicit(baseService, allStacks, commonServices, extensions);
+      } else {
+        setValid(false);
+        extensionInfo.setValid(false);
+        addErrors(baseService.getErrors());
+        extensionInfo.addErrors(baseService.getErrors());
+      }
+    }
+  }
+
+  /**
+   * Populate the extension module and info from the extension definition.
+   */
+  private void populateExtensionInfo() {
+    extensionInfo.setName(extensionDirectory.getExtensionDirName());
+    extensionInfo.setVersion(extensionDirectory.getName());
+
+    id = String.format("%s:%s", extensionInfo.getName(), extensionInfo.getVersion());
+
+    LOG.debug("Adding new extension to known extensions"
+        + ", extensionName = " + extensionInfo.getName()
+        + ", extensionVersion = " + extensionInfo.getVersion());
+
+
+    //todo: give additional thought on handling missing metainfo.xml
+    ExtensionMetainfoXml emx = extensionDirectory.getMetaInfoFile();
+    if (emx != null) {
+      if (!emx.isValid()) {
+        extensionInfo.setValid(false);
+        extensionInfo.addErrors(emx.getErrors());
+      }
+      extensionInfo.setParentExtensionVersion(emx.getExtends());
+      extensionInfo.setStacks(emx.getStacks());
+      extensionInfo.setExtensions(emx.getExtensions());
+    }
+
+    try {
+      // Read the service for this extension
+      populateServices();
+      if (!extensionInfo.isValid()) {
+        setValid(false);
+        addErrors(extensionInfo.getErrors());
+      }
+
+      //todo: shouldn't blindly catch Exception, re-evaluate this.
+    } catch (Exception e) {
+      String error = "Exception caught while populating services for extension: " +
+          extensionInfo.getName() + "-" + extensionInfo.getVersion();
+      setValid(false);
+      extensionInfo.setValid(false);
+      addError(error);
+      extensionInfo.addError(error);
+      LOG.error(error);
+    }
+  }
+
+  /**
+   * Populate the child services.
+   */
+  private void populateServices()throws AmbariException {
+    for (ServiceDirectory serviceDir : extensionDirectory.getServiceDirectories()) {
+      populateService(serviceDir);
+    }
+  }
+
+  /**
+   * Populate a child service.
+   *
+   * @param serviceDirectory the child service directory
+   */
+  private void populateService(ServiceDirectory serviceDirectory)  {
+    Collection<ServiceModule> serviceModules = new ArrayList<ServiceModule>();
+    // unfortunately, we allow multiple services to be specified in the same metainfo.xml,
+    // so we can't move the unmarshal logic into ServiceModule
+    ServiceMetainfoXml metaInfoXml = serviceDirectory.getMetaInfoFile();
+    if (!metaInfoXml.isValid()){
+      extensionInfo.setValid(metaInfoXml.isValid());
+      setValid(metaInfoXml.isValid());
+      extensionInfo.addErrors(metaInfoXml.getErrors());
+      addErrors(metaInfoXml.getErrors());
+      return;
+    }
+    List<ServiceInfo> serviceInfos = metaInfoXml.getServices();
+
+    for (ServiceInfo serviceInfo : serviceInfos) {
+      ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory);
+      serviceModules.add(serviceModule);
+      if (!serviceModule.isValid()){
+        extensionInfo.setValid(false);
+        setValid(false);
+        extensionInfo.addErrors(serviceModule.getErrors());
+        addErrors(serviceModule.getErrors());
+      }
+    }
+    addServices(serviceModules);
+  }
+
+  /**
+   * Resolve another extension module.
+   *
+   * @param parentExtension    extension module to be resolved
+   * @param allStacks          all stack modules in stack definition
+   * @param commonServices     all common services specified in the stack definition
+   * @param extensions         all extensions
+   * @throws AmbariException if unable to resolve the extension
+   */
+  private void resolveExtension(
+          ExtensionModule parentExtension, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+          throws AmbariException {
+    if (parentExtension.getModuleState() == ModuleState.INIT) {
+	  parentExtension.resolve(null, allStacks, commonServices, extensions);
+    } else if (parentExtension.getModuleState() == ModuleState.VISITED) {
+      //todo: provide more information to user about cycle
+      throw new AmbariException("Cycle detected while parsing extension definition");
+    }
+    if (!parentExtension.isValid() || (parentExtension.getModuleInfo() != null && !parentExtension.getModuleInfo().isValid())) {
+      setValid(parentExtension.isValid());
+      extensionInfo.setValid(parentExtension.extensionInfo.isValid());
+      addErrors(parentExtension.getErrors());
+      extensionInfo.addErrors(parentExtension.getErrors());
+    }
+  }
+
+  /**
+   * Add a child service module to the extension.
+   *
+   * @param service  service module to add
+   */
+  private void addService(ServiceModule service) {
+    ServiceInfo serviceInfo = service.getModuleInfo();
+    Object previousValue = serviceModules.put(service.getId(), service);
+    if (previousValue == null) {
+      extensionInfo.getServices().add(serviceInfo);
+    }
+  }
+
+  /**
+   * Add child service modules to the extension.
+   *
+   * @param services  collection of service modules to add
+   */
+  private void addServices(Collection<ServiceModule> services) {
+    for (ServiceModule service : services) {
+      addService(service);
+    }
+  }
+
+  @Override
+  public boolean isValid() {
+    return valid;
+  }
+
+  @Override
+  public void setValid(boolean valid) {
+    this.valid = valid;
+  }
+
+  private Set<String> errorSet = new HashSet<String>();
+
+  @Override
+  public Collection getErrors() {
+    return errorSet;
+  }
+
+  @Override
+  public void addError(String error) {
+    errorSet.add(error);
+  }
+
+  @Override
+  public void addErrors(Collection<String> errors) {
+    this.errorSet.addAll(errors);
+  }
+
+}

+ 4 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/ModuleFileUnmarshaller.java

@@ -20,6 +20,7 @@ package org.apache.ambari.server.stack;
 
 import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.ConfigurationXml;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
 import org.apache.ambari.server.state.stack.RepositoryXml;
 import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 import org.apache.ambari.server.state.stack.StackMetainfoXml;
@@ -27,7 +28,9 @@ import org.apache.ambari.server.state.stack.UpgradePack;
 
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
+
 import java.io.File;
 import java.util.HashMap;
 import java.util.Map;
@@ -72,6 +75,7 @@ class ModuleFileUnmarshaller {
       jaxbContexts.put(UpgradePack.class, ctx);
       jaxbContexts.put(ConfigUpgradePack.class, ctx);
       jaxbContexts.put(ServiceMetainfoXml.class, JAXBContext.newInstance(ServiceMetainfoXml.class));
+      jaxbContexts.put(ExtensionMetainfoXml.class, JAXBContext.newInstance(ExtensionMetainfoXml.class));
     } catch (JAXBException e) {
       throw new RuntimeException (e);
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/QuickLinksConfigurationModule.java

@@ -83,7 +83,7 @@ public class QuickLinksConfigurationModule extends BaseModule<QuickLinksConfigur
   }
 
   @Override
-  public void resolve(QuickLinksConfigurationModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException {
+  public void resolve(QuickLinksConfigurationModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> allExtensions) throws AmbariException {
     QuickLinksConfigurationInfo parentModuleInfo = parent.getModuleInfo();
 
     if (parent.getModuleInfo() != null && !moduleInfo.isDeleted()) {

+ 86 - 28
ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java

@@ -22,11 +22,17 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
+import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.xml.bind.JAXBException;
+
 import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -59,6 +65,16 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
    */
   private File kerberosDescriptorFile;
 
+  /**
+   * RCO file
+   */
+  private File rcoFile;
+
+  /**
+   * role command order
+   */
+  private StackRoleCommandOrder roleCommandOrder;
+
   /**
    * widgets descriptor file
    */
@@ -119,33 +135,6 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
   public ServiceDirectory(String servicePath) throws AmbariException {
     super(servicePath);
     parsePath();
-
-    File af = new File(directory.getAbsolutePath()
-        + File.separator + AmbariMetaInfo.SERVICE_ALERT_FILE_NAME);
-    alertsFile = af.exists() ? af : null;
-
-    File kdf = new File(directory.getAbsolutePath()
-        + File.separator + AmbariMetaInfo.KERBEROS_DESCRIPTOR_FILE_NAME);
-    kerberosDescriptorFile = kdf.exists() ? kdf : null;
-
-    if (metaInfoXml.getServices() != null) {
-      for (ServiceInfo serviceInfo : metaInfoXml.getServices()) {
-        File mf = new File(directory.getAbsolutePath()
-                + File.separator + serviceInfo.getMetricsFileName());
-        metricsFileMap.put(serviceInfo.getName(), mf.exists() ? mf : null);
-
-        File wdf = new File(directory.getAbsolutePath()
-                + File.separator + serviceInfo.getWidgetsFileName());
-        widgetsDescriptorFileMap.put(serviceInfo.getName(), wdf.exists() ? wdf : null);
-      }
-    }
-
-    File advFile = new File(directory.getAbsolutePath()
-        + File.separator + AmbariMetaInfo.SERVICE_ADVISOR_FILE_NAME);
-    advisorFile = advFile.exists() ? advFile : null;
-
-    File themeFile = new File(directory.getAbsolutePath() + File.separator + AmbariMetaInfo.SERVICE_THEME_FILE_NAME);
-    this.themeFile = themeFile.exists() ? themeFile : null;
   }
 
   /**
@@ -236,10 +225,55 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
     return metaInfoXml;
   }
 
+  /**
+   * Obtain the object representation of the service role_command_order.json file
+   *
+   * @return object representation of the service role_command_order.json file
+   */
+  public StackRoleCommandOrder getRoleCommandOrder() {
+    return roleCommandOrder;
+  }
+
   /**
    * Parse the service directory.
    */
-  protected abstract void parsePath() throws AmbariException;
+  protected void parsePath() throws AmbariException {
+    calculateDirectories();
+    parseMetaInfoFile();
+
+    File af = new File(directory, AmbariMetaInfo.SERVICE_ALERT_FILE_NAME);
+    alertsFile = af.exists() ? af : null;
+
+    File kdf = new File(directory, AmbariMetaInfo.KERBEROS_DESCRIPTOR_FILE_NAME);
+    kerberosDescriptorFile = kdf.exists() ? kdf : null;
+
+    File rco = new File(directory, AmbariMetaInfo.RCO_FILE_NAME);
+    if (rco.exists()) {
+      rcoFile = rco;
+      parseRoleCommandOrder();
+    }
+
+    if (metaInfoXml.getServices() != null) {
+      for (ServiceInfo serviceInfo : metaInfoXml.getServices()) {
+        File mf = new File(directory, serviceInfo.getMetricsFileName());
+        metricsFileMap.put(serviceInfo.getName(), mf.exists() ? mf : null);
+
+        File wdf = new File(directory, serviceInfo.getWidgetsFileName());
+        widgetsDescriptorFileMap.put(serviceInfo.getName(), wdf.exists() ? wdf : null);
+      }
+    }
+
+    File advFile = new File(directory, AmbariMetaInfo.SERVICE_ADVISOR_FILE_NAME);
+    advisorFile = advFile.exists() ? advFile : null;
+
+    File themeFile = new File(directory, AmbariMetaInfo.SERVICE_THEME_FILE_NAME);
+    this.themeFile = themeFile.exists() ? themeFile : null;
+  }
+
+  /**
+   * Calculate the service specific directories.
+   */
+  protected abstract void calculateDirectories();
 
   /**
    * Unmarshal the metainfo file into its object representation.
@@ -266,4 +300,28 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
     }
   }
 
+  /**
+   * Parse role command order file
+   */
+  private void parseRoleCommandOrder() {
+    if (rcoFile == null)
+      return;
+
+    try {
+      ObjectMapper mapper = new ObjectMapper();
+      TypeReference<Map<String, Object>> rcoElementTypeReference = new TypeReference<Map<String, Object>>() {};
+      HashMap<String, Object> result = mapper.readValue(rcoFile, rcoElementTypeReference);
+      LOG.info("Role command order info was loaded from file: {}", rcoFile.getAbsolutePath());
+
+      roleCommandOrder = new StackRoleCommandOrder(result);
+
+      if (LOG.isDebugEnabled() && rcoFile != null) {
+        LOG.debug("Role Command Order for " + rcoFile.getAbsolutePath());
+        roleCommandOrder.printRoleCommandOrder(LOG);
+      }
+    } catch (IOException e) {
+      LOG.error(String.format("Can not read role command order info %s", rcoFile.getAbsolutePath()), e);
+    }
+  }
+
 }

+ 35 - 22
ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceModule.java

@@ -36,6 +36,8 @@ import org.apache.ambari.server.state.ThemeInfo;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.commons.lang.builder.ToStringStyle;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
 
@@ -101,6 +103,11 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
    */
   protected boolean valid = true;
 
+  /**
+   * Logger
+   */
+  private final static Logger LOG = LoggerFactory.getLogger(ServiceModule.class);
+
   /**
    * Constructor.
    *
@@ -131,6 +138,7 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
     serviceInfo.setAlertsFile(serviceDirectory.getAlertsFile());
     serviceInfo.setKerberosDescriptorFile(serviceDirectory.getKerberosDescriptorFile());
     serviceInfo.setWidgetsDescriptorFile(serviceDirectory.getWidgetsDescriptorFile(serviceInfo.getName()));
+    serviceInfo.setRoleCommandOrder(serviceDirectory.getRoleCommandOrder());
     serviceInfo.setSchemaVersion(AmbariMetaInfo.SCHEMA_VERSION_2);
     serviceInfo.setServicePackageFolder(serviceDirectory.getPackageDir());
     serviceInfo.setServiceUpgradesFolder(serviceDirectory.getUpgradesDir());
@@ -152,25 +160,27 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
 
   @Override
   public void resolve(
-      ServiceModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      ServiceModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
-    resolveInternal(parentModule, allStacks, commonServices, false);
+    resolveInternal(parentModule, allStacks, commonServices, extensions, false);
   }
 
   public void resolveExplicit(
-      ServiceModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      ServiceModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
-    resolveInternal(parentModule, allStacks, commonServices, true);
+    resolveInternal(parentModule, allStacks, commonServices, extensions, true);
   }
 
   public void resolveInternal(
       ServiceModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices,
-      boolean resolveExplicit)
+      Map<String, ExtensionModule> extensions, boolean resolveExplicit)
       throws AmbariException {
     if (!serviceInfo.isValid() || !parentModule.isValid()) {
       return;
     }
 
+    LOG.info("Resolve service");
+
     // If resolving against parent stack service module (stack inheritance), do not merge if an
     // explicit parent is specified
     if(!StringUtils.isBlank(serviceInfo.getParent()) && !resolveExplicit) {
@@ -182,6 +192,7 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
     if (serviceInfo.getComment() == null) {
       serviceInfo.setComment(parent.getComment());
     }
+    LOG.info("Display name service/parent: " + serviceInfo.getDisplayName() + "/" + parent.getDisplayName());
     if (serviceInfo.getDisplayName() == null) {
       serviceInfo.setDisplayName(parent.getDisplayName());
     }
@@ -239,17 +250,19 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
       serviceInfo.setAdvisorName(parent.getAdvisorName());
     }
 
+    if (serviceInfo.getRoleCommandOrder() == null) {
+      serviceInfo.setRoleCommandOrder(parent.getRoleCommandOrder());
+    }
+
     mergeCustomCommands(parent.getCustomCommands(), serviceInfo.getCustomCommands());
     mergeConfigDependencies(parent);
-    mergeComponents(parentModule, allStacks, commonServices);
-    mergeConfigurations(parentModule, allStacks, commonServices);
-    mergeThemes(parentModule, allStacks, commonServices);
-    mergeQuickLinksConfigurations(parentModule, allStacks, commonServices);
+    mergeComponents(parentModule, allStacks, commonServices, extensions);
+    mergeConfigurations(parentModule, allStacks, commonServices, extensions);
+    mergeThemes(parentModule, allStacks, commonServices, extensions);
+    mergeQuickLinksConfigurations(parentModule, allStacks, commonServices, extensions);
     mergeExcludedConfigTypes(parent);
 
-
     mergeServiceProperties(parent.getServicePropertyList());
-
   }
 
   /**
@@ -296,7 +309,7 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
    *
    * @throws AmbariException
    */
-  public void resolveCommonService(Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+  public void resolveCommonService(Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     if(!isCommonService) {
       throw new AmbariException("Not a common service");
@@ -314,12 +327,12 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
         ServiceModule baseService = commonServices.get(baseServiceKey);
         ModuleState baseModuleState = baseService.getModuleState();
         if (baseModuleState == ModuleState.INIT) {
-          baseService.resolveCommonService(allStacks, commonServices);
+          baseService.resolveCommonService(allStacks, commonServices, extensions);
         } else if (baseModuleState == ModuleState.VISITED) {
           //todo: provide more information to user about cycle
           throw new AmbariException("Cycle detected while parsing common service");
         }
-        resolveExplicit(baseService, allStacks, commonServices);
+        resolveExplicit(baseService, allStacks, commonServices, extensions);
       } else {
         throw new AmbariException("Common service cannot inherit from a non common service");
       }
@@ -414,8 +427,8 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
    * Merge theme modules.
    */
   private void mergeThemes(ServiceModule parent, Map<String, StackModule> allStacks,
-                           Map<String, ServiceModule> commonServices) throws AmbariException {
-    Collection<ThemeModule> mergedModules = mergeChildModules(allStacks, commonServices, themeModules, parent.themeModules);
+                           Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException {
+    Collection<ThemeModule> mergedModules = mergeChildModules(allStacks, commonServices, extensions, themeModules, parent.themeModules);
 
     for (ThemeModule mergedModule : mergedModules) {
       themeModules.put(mergedModule.getId(), mergedModule);
@@ -448,8 +461,8 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
    * Merge theme modules.
    */
   private void mergeQuickLinksConfigurations(ServiceModule parent, Map<String, StackModule> allStacks,
-                           Map<String, ServiceModule> commonServices) throws AmbariException {
-    Collection<QuickLinksConfigurationModule> mergedModules = mergeChildModules(allStacks, commonServices, quickLinksConfigurationModules, parent.quickLinksConfigurationModules);
+                           Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException {
+    Collection<QuickLinksConfigurationModule> mergedModules = mergeChildModules(allStacks, commonServices, extensions, quickLinksConfigurationModules, parent.quickLinksConfigurationModules);
 
     for (QuickLinksConfigurationModule mergedModule : mergedModules) {
       quickLinksConfigurationModules.put(mergedModule.getId(), mergedModule);
@@ -514,13 +527,13 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
    * @param commonServices  common service modules
    */
   private void mergeConfigurations(
-      ServiceModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      ServiceModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     serviceInfo.getProperties().clear();
     serviceInfo.setAllConfigAttributes(new HashMap<String, Map<String, Map<String, String>>>());
 
     Collection<ConfigurationModule> mergedModules = mergeChildModules(
-        allStacks, commonServices, configurationModules, parent.configurationModules);
+        allStacks, commonServices, extensions, configurationModules, parent.configurationModules);
 
     for (ConfigurationModule module : mergedModules) {
       configurationModules.put(module.getId(), module);
@@ -540,11 +553,11 @@ public class ServiceModule extends BaseModule<ServiceModule, ServiceInfo> implem
    * @param commonServices  common service modules
    */
   private void mergeComponents(
-      ServiceModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      ServiceModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     serviceInfo.getComponents().clear();
     Collection<ComponentModule> mergedModules = mergeChildModules(
-        allStacks, commonServices, componentModules, parent.componentModules);
+        allStacks, commonServices, extensions, componentModules, parent.componentModules);
     componentModules.clear();
     for (ComponentModule module : mergedModules) {
       componentModules.put(module.getId(), module);

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java

@@ -37,10 +37,11 @@ public interface StackDefinitionModule <T, I> {
    * @param parent          the parent that this module will be merged with
    * @param allStacks       collection of all stack modules in the tree
    * @param commonServices  collection of all common service modules in the tree
+   * @param extensions  collection of all extension modules in the tree
    *
    * @throws AmbariException if resolution fails
    */
-  public void resolve(T parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException;
+  public void resolve(T parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException;
 
   /**
    * Obtain the associated module information.

+ 0 - 32
ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java

@@ -272,7 +272,6 @@ public class StackDirectory extends StackDefinitionDirectory {
    *
    * @return object representation of the stack role_command_order.json file
    */
-
   public StackRoleCommandOrder getRoleCommandOrder() {
     return roleCommandOrder;
   }
@@ -509,7 +508,6 @@ public class StackDirectory extends StackDefinitionDirectory {
         result = new HashMap<String, Object>();
       }
       roleCommandOrder = new StackRoleCommandOrder(result);
-      parseRoleCommandOrdersForServices();
       if (LOG.isDebugEnabled()) {
         LOG.debug("Role Command Order for " + rcoFilePath);
         roleCommandOrder.printRoleCommandOrder(LOG);
@@ -518,34 +516,4 @@ public class StackDirectory extends StackDefinitionDirectory {
       LOG.error(String.format("Can not read role command order info %s", rcoFilePath), e);
     }
   }
-
-  private void parseRoleCommandOrdersForServices() {
-    if (rcoFilePath != null) {
-      File stack = new File(rcoFilePath).getParentFile();
-      File servicesDir = new File(stack, "services");
-      File[] services = servicesDir.listFiles();
-      for (File service : services) {
-        if (service.isDirectory()) {
-          File rcoFile = new File(service, ROLE_COMMAND_ORDER_FILE);
-          if (rcoFile.exists())
-            parseRoleCommandOrdersForService(rcoFile);
-        }
-      }
-    }
-  }
-
-  private void parseRoleCommandOrdersForService(File rcoFile) {
-    HashMap<String, Object> result = null;
-    ObjectMapper mapper = new ObjectMapper();
-    TypeReference<Map<String, Object>> rcoElementTypeReference = new TypeReference<Map<String, Object>>() {};
-    try {
-      result = mapper.readValue(rcoFile, rcoElementTypeReference);
-      LOG.info("Role command order info was loaded from file: {}", rcoFile.getAbsolutePath());
-      StackRoleCommandOrder serviceRoleCommandOrder = new StackRoleCommandOrder(result);
-      roleCommandOrder.merge(serviceRoleCommandOrder, true);
-    } catch (IOException e) {
-      LOG.error(String.format("Can not read role command order info %s", rcoFile), e);
-    }
-  }
-
 }

+ 214 - 9
ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java

@@ -22,6 +22,7 @@ import java.io.File;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 
 import javax.annotation.Nullable;
@@ -35,9 +36,14 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.OsFamily;
@@ -71,11 +77,20 @@ public class StackManager {
    */
   public static final String COMMON_SERVICES = "common-services";
 
+  /**
+   * Prefix used for extension services parent path string
+   */
+  public static final String EXTENSIONS = "extensions";
+
+  public static final String METAINFO_FILE_NAME = "metainfo.xml";
+
   /**
    * Provides access to non-stack server functionality
    */
   private StackContext stackContext;
 
+  private File stackRoot;
+
   /**
    * Logger
    */
@@ -86,6 +101,11 @@ public class StackManager {
    */
   private Map<String, StackInfo> stackMap = new HashMap<String, StackInfo>();
 
+  /**
+   * Map of extension id to extension info
+   */
+  private Map<String, ExtensionInfo> extensionMap = new HashMap<String, ExtensionInfo>();
+
   /**
    * Constructor. Initialize stack manager.
    *
@@ -93,6 +113,8 @@ public class StackManager {
    *          stack root directory
    * @param commonServicesRoot
    *          common services root directory
+   * @param extensionRoot
+   *          extensions root directory
    * @param osFamily
    *          the OS family read from resources
    * @param metaInfoDAO
@@ -101,6 +123,10 @@ public class StackManager {
    *          action meta data automatically injected
    * @param stackDao
    *          stack DAO automatically injected
+   * @param extensionDao
+   *          extension DAO automatically injected
+   * @param linkDao
+   *          extension link DAO automatically injected
    *
    * @throws AmbariException
    *           if an exception occurs while processing the stacks
@@ -108,9 +134,10 @@ public class StackManager {
   @Inject
   public StackManager(@Assisted("stackRoot") File stackRoot,
       @Assisted("commonServicesRoot") @Nullable File commonServicesRoot,
+      @Assisted("extensionRoot") @Nullable File extensionRoot,
       @Assisted OsFamily osFamily, @Assisted boolean validate,
-                      MetainfoDAO metaInfoDAO,
-      ActionMetadata actionMetadata, StackDAO stackDao)
+      MetainfoDAO metaInfoDAO, ActionMetadata actionMetadata, StackDAO stackDao,
+      ExtensionDAO extensionDao, ExtensionLinkDAO linkDao)
       throws AmbariException {
 
     LOG.info("Initializing the stack manager...");
@@ -118,22 +145,50 @@ public class StackManager {
     if (validate) {
       validateStackDirectory(stackRoot);
       validateCommonServicesDirectory(commonServicesRoot);
+      validateExtensionDirectory(extensionRoot);
     }
 
     stackMap = new HashMap<String, StackInfo>();
     stackContext = new StackContext(metaInfoDAO, actionMetadata, osFamily);
+    extensionMap = new HashMap<String, ExtensionInfo>();
 
     Map<String, ServiceModule> commonServiceModules = parseCommonServicesDirectory(commonServicesRoot);
     Map<String, StackModule> stackModules = parseStackDirectory(stackRoot);
+    LOG.info("About to parse extension directories");
+    Map<String, ExtensionModule> extensionModules = null;
+    extensionModules = parseExtensionDirectory(extensionRoot);
+
+    //Read the extension links from the DB
+    for (StackModule module : stackModules.values()) {
+      StackInfo stack = module.getModuleInfo();
+      List<ExtensionLinkEntity> entities = linkDao.findByStack(stack.getName(), stack.getVersion());
+      for (ExtensionLinkEntity entity : entities) {
+        String name = entity.getExtension().getExtensionName();
+        String version = entity.getExtension().getExtensionVersion();
+        String key = name + StackManager.PATH_DELIMITER + version;
+        ExtensionModule extensionModule = extensionModules.get(key);
+        if (extensionModule != null) {
+          LOG.info("Adding extension to stack/version: " + stack.getName() + "/" + stack.getVersion() +
+                   " extension/version: " + name + "/" + version);
+          //Add the extension to the stack
+          module.getExtensionModules().put(key, extensionModule);
+        }
+      }
+    }
 
-    fullyResolveCommonServices(stackModules, commonServiceModules);
-    fullyResolveStacks(stackModules, commonServiceModules);
+    fullyResolveCommonServices(stackModules, commonServiceModules, extensionModules);
+    fullyResolveExtensions(stackModules, commonServiceModules, extensionModules);
+    fullyResolveStacks(stackModules, commonServiceModules, extensionModules);
 
+    populateDB(stackDao, extensionDao);
+  }
+
+  private void populateDB(StackDAO stackDao, ExtensionDAO extensionDao) throws AmbariException {
     // for every stack read in, ensure that we have a database entry for it;
     // don't put try/catch logic around this since a failure here will
     // cause other things to break down the road
     Collection<StackInfo> stacks = getStacks();
-    for( StackInfo stack : stacks ){
+    for(StackInfo stack : stacks){
       String stackName = stack.getName();
       String stackVersion = stack.getVersion();
 
@@ -147,6 +202,25 @@ public class StackManager {
         stackDao.create(stackEntity);
       }
     }
+
+    // for every extension read in, ensure that we have a database entry for it;
+    // don't put try/catch logic around this since a failure here will
+    // cause other things to break down the road
+    Collection<ExtensionInfo> extensions = getExtensions();
+    for(ExtensionInfo extension : extensions){
+      String extensionName = extension.getName();
+      String extensionVersion = extension.getVersion();
+
+      if (extensionDao.find(extensionName, extensionVersion) == null) {
+        LOG.info("Adding extension {}-{} to the database", extensionName, extensionVersion);
+
+        ExtensionEntity extensionEntity = new ExtensionEntity();
+        extensionEntity.setExtensionName(extensionName);
+        extensionEntity.setExtensionVersion(extensionVersion);
+
+        extensionDao.create(extensionEntity);
+      }
+    }
   }
 
   /**
@@ -187,6 +261,44 @@ public class StackManager {
     return stackMap.values();
   }
 
+  /**
+   * Obtain the extension info specified by name and version.
+   *
+   * @param name     name of the extension
+   * @param version  version of the extension
+   * @return The extension corresponding to the specified name and version.
+   *         If no matching stack exists, null is returned.
+   */
+  public ExtensionInfo getExtension(String name, String version) {
+    return extensionMap.get(name + StackManager.PATH_DELIMITER + version);
+  }
+
+  /**
+   * Obtain all extensions for the given name.
+   *
+   * @param name  extension name
+   * @return A collection of all extensions with the given name.
+   *         If no extensions match the specified name, an empty collection is returned.
+   */
+  public Collection<ExtensionInfo> getExtensions(String name) {
+    Collection<ExtensionInfo> extensions = new HashSet<ExtensionInfo>();
+    for (ExtensionInfo extension: extensionMap.values()) {
+      if (extension.getName().equals(name)) {
+	  extensions.add(extension);
+      }
+    }
+    return extensions;
+  }
+
+  /**
+   * Obtain all extensions.
+   *
+   * @return collection of all extensions
+   */
+  public Collection<ExtensionInfo> getExtensions() {
+    return extensionMap.values();
+  }
+
   /**
    * Determine if all tasks which update stack repo urls have completed.
    *
@@ -204,12 +316,12 @@ public class StackManager {
    * @throws AmbariException if unable to resolve all stacks
    */
   private void fullyResolveStacks(
-      Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules)
+      Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     // Resolve all stacks without finalizing the stacks.
     for (StackModule stack : stackModules.values()) {
       if (stack.getModuleState() == ModuleState.INIT) {
-        stack.resolve(null, stackModules, commonServiceModules);
+        stack.resolve(null, stackModules, commonServiceModules, extensions);
       }
     }
     // Finalize the common services and stacks to remove sub-modules marked for deletion.
@@ -219,6 +331,9 @@ public class StackManager {
     for(ServiceModule commonService : commonServiceModules.values()) {
       commonService.finalizeModule();
     }
+    for (ExtensionModule extension : extensions.values()) {
+      extension.finalizeModule();
+    }
     for (StackModule stack : stackModules.values()) {
       stack.finalizeModule();
     }
@@ -234,11 +349,29 @@ public class StackManager {
    * @throws AmbariException if unable to resolve all common services
    */
   private void fullyResolveCommonServices(
-      Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules)
+      Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     for(ServiceModule commonService : commonServiceModules.values()) {
       if (commonService.getModuleState() == ModuleState.INIT) {
-        commonService.resolveCommonService(stackModules, commonServiceModules);
+        commonService.resolveCommonService(stackModules, commonServiceModules, extensions);
+      }
+    }
+  }
+
+  /**
+   * Fully resolve extensions.
+   *
+   * @param extensionModules      map of extension id which contains name and version to extension module.
+   * @param stackModules          map of stack id which contains name and version to stack module.
+   * @param commonServiceModules  map of common service id which contains name and version to common service module.
+   * @throws AmbariException if unable to resolve all extensions
+   */
+  private void fullyResolveExtensions(Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules,
+      Map<String, ExtensionModule> extensionModules)
+      throws AmbariException {
+    for(ExtensionModule extensionModule : extensionModules.values()) {
+      if (extensionModule.getModuleState() == ModuleState.INIT) {
+        extensionModule.resolve(null, stackModules, commonServiceModules, extensionModules);
       }
     }
   }
@@ -321,6 +454,34 @@ public class StackManager {
     }
   }
 
+
+
+  /**
+   * Validate that the specified extension root is a valid directory.
+   *
+   * @param extensionRoot  the extension root directory to validate
+   * @throws AmbariException if the specified extension root directory is invalid
+   */
+  private void validateExtensionDirectory(File extensionRoot) throws AmbariException {
+    LOG.info("Validating extension directory {} ...", extensionRoot);
+
+    if (extensionRoot == null)
+	return;
+
+    String extensionRootAbsPath = extensionRoot.getAbsolutePath();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Loading extension information"
+          + ", extensionRoot = " + extensionRootAbsPath);
+    }
+
+    //For backwards compatibility extension directory may not exist
+    if (extensionRoot.exists() && !extensionRoot.isDirectory()) {
+      throw new AmbariException("" + Configuration.METADATA_DIR_PATH
+          + " should be a directory"
+          + ", extensionRoot = " + extensionRootAbsPath);
+    }
+  }
+
   /**
    * Parse the specified common services root directory
    *
@@ -399,4 +560,48 @@ public class StackManager {
     }
     return stackModules;
   }
+
+  public void linkStackToExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  }
+
+  public void unlinkStackAndExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  }
+
+  /**
+   * Parse the specified extension root directory
+   *
+   * @param extensionRoot  the extension root directory to parse
+   * @return map of extension id which contains name and version to extension module.
+   * @throws AmbariException if unable to parse all extensions
+   */
+  private Map<String, ExtensionModule> parseExtensionDirectory(File extensionRoot) throws AmbariException {
+    Map<String, ExtensionModule> extensionModules = new HashMap<String, ExtensionModule>();
+    if (extensionRoot == null || !extensionRoot.exists())
+      return extensionModules;
+
+    File[] extensionFiles = extensionRoot.listFiles(AmbariMetaInfo.FILENAME_FILTER);
+    for (File extensionNameFolder : extensionFiles) {
+      if (extensionNameFolder.isFile()) {
+        continue;
+      }
+      for (File extensionVersionFolder : extensionNameFolder.listFiles(AmbariMetaInfo.FILENAME_FILTER)) {
+        if (extensionVersionFolder.isFile()) {
+          continue;
+        }
+        String extensionName = extensionNameFolder.getName();
+        String extensionVersion = extensionVersionFolder.getName();
+
+        ExtensionModule extensionModule = new ExtensionModule(new ExtensionDirectory(extensionVersionFolder.getPath()), stackContext);
+        String extensionKey = extensionName + StackManager.PATH_DELIMITER + extensionVersion;
+        extensionModules.put(extensionKey, extensionModule);
+        extensionMap.put(extensionKey, extensionModule.getModuleInfo());
+      }
+    }
+
+    if (stackMap.isEmpty()) {
+      throw new AmbariException("Unable to find extension definitions under " +
+          "extensionRoot = " + extensionRoot.getAbsolutePath());
+    }
+    return extensionModules;
+  }
 }

+ 3 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/StackManagerFactory.java

@@ -38,11 +38,14 @@ public interface StackManagerFactory {
    * @param commonServicesRoot
    *          the root of the common services from which other stack services
    *          are extended (not {@code null}).
+   * @param extensionRoot
+   *          the root of the extensions (not {@code null}).
    * @param osFamily
    *          the list of all parsed OS families (not {@code null}).
    * @return a stack manager instance which contains all parsed stacks.
    */
   StackManager create(@Assisted("stackRoot") File stackRoot,
       @Nullable @Assisted("commonServicesRoot") File commonServicesRoot,
+      @Assisted("extensionRoot") @Nullable File extensionRoot,
       OsFamily osFamily, boolean validate);
 }

+ 184 - 27
ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java

@@ -36,6 +36,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.stack.StackDefinitionDirectory;
 import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.ExtensionInfo;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.RepositoryInfo;
@@ -106,6 +107,11 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    */
   private Map<String, ServiceModule> serviceModules = new HashMap<String, ServiceModule>();
 
+  /**
+   * Map of linked extension modules keyed by extension name + version
+   */
+  private Map<String, ExtensionModule> extensionModules = new HashMap<String, ExtensionModule>();
+
   /**
    * Corresponding StackInfo instance
    */
@@ -148,6 +154,14 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     populateStackInfo();
   }
 
+  public Map<String, ServiceModule> getServiceModules() {
+	  return serviceModules;
+  }
+
+  public Map<String, ExtensionModule> getExtensionModules() {
+	  return extensionModules;
+  }
+
   /**
    * Fully resolve the stack. See stack resolution description in the class documentation.
    * If the stack has a parent, this stack will be merged against its fully resolved parent
@@ -160,20 +174,34 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    *                       have containing modules
    * @param allStacks      all stacks modules contained in the stack definition
    * @param commonServices all common services specified in the stack definition
+   * @param extensions     all extension modules contained in the stack definition
    *
    * @throws AmbariException if an exception occurs during stack resolution
    */
   @Override
   public void resolve(
-      StackModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      StackModule parentModule, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     moduleState = ModuleState.VISITED;
+    LOG.info("Resolve: " + stackInfo.getName() + ":" + stackInfo.getVersion());
     String parentVersion = stackInfo.getParentStackVersion();
-    mergeServicesWithExplicitParent(allStacks, commonServices);
+    mergeServicesWithExplicitParent(allStacks, commonServices, extensions);
+    addExtensionServices();
+
     // merge with parent version of same stack definition
     if (parentVersion != null) {
-      mergeStackWithParent(parentVersion, allStacks, commonServices);
+      mergeStackWithParent(parentVersion, allStacks, commonServices, extensions);
     }
+    for (ExtensionInfo extension : stackInfo.getExtensions()) {
+      String extensionKey = extension.getName() + StackManager.PATH_DELIMITER + extension.getVersion();
+      ExtensionModule extensionModule = extensions.get(extensionKey);
+      if (extensionModule == null) {
+        throw new AmbariException("Extension '" + stackInfo.getName() + ":" + stackInfo.getVersion() +
+                        "' specifies an extension " + extensionKey + " that doesn't exist");
+      }
+      mergeStackWithExtension(extensionModule, allStacks, commonServices, extensions);
+    }
+
     processUpgradePacks();
     processRepositories();
     processPropertyDependencies();
@@ -199,6 +227,12 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
   public void finalizeModule() {
     finalizeChildModules(serviceModules.values());
     finalizeChildModules(configurationModules.values());
+
+    // This needs to be merged during the finalize to avoid the RCO from services being inherited by the children stacks
+    // The RCOs from a service should only be inherited through the service.
+    for (ServiceModule module : serviceModules.values()) {
+      mergeRoleCommandOrder(module);
+    }
   }
 
   /**
@@ -220,7 +254,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * @throws AmbariException if an exception occurs merging with the parent
    */
   private void mergeStackWithParent(
-      String parentVersion, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      String parentVersion, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
 
     String parentStackKey = stackInfo.getName() + StackManager.PATH_DELIMITER + parentVersion;
@@ -231,8 +265,8 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
           "' specifies a parent that doesn't exist");
     }
 
-    resolveStack(parentStack, allStacks, commonServices);
-    mergeConfigurations(parentStack, allStacks, commonServices);
+    resolveStack(parentStack, allStacks, commonServices, extensions);
+    mergeConfigurations(parentStack, allStacks, commonServices, extensions);
     mergeRoleCommandOrder(parentStack);
 
     if (stackInfo.getStackHooksFolder() == null) {
@@ -248,7 +282,22 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
     }
 
-    mergeServicesWithParent(parentStack, allStacks, commonServices);
+    mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
+  }
+
+  /**
+   * Merge the stack with one of its linked extensions.
+   *
+   * @param allStacks      all stacks in stack definition
+   * @param commonServices all common services specified in the stack definition
+   * @param parentVersion  version of the stacks parent
+   *
+   * @throws AmbariException if an exception occurs merging with the parent
+   */
+  private void mergeStackWithExtension(
+		  ExtensionModule extension, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+
   }
 
   /**
@@ -261,11 +310,11 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * @throws AmbariException if an exception occurs merging the child services with the parent stack
    */
   private void mergeServicesWithParent(
-      StackModule parentStack, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      StackModule parentStack, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     stackInfo.getServices().clear();
     Collection<ServiceModule> mergedModules = mergeChildModules(
-        allStacks, commonServices, serviceModules, parentStack.serviceModules);
+        allStacks, commonServices, extensions, serviceModules, parentStack.serviceModules);
     for (ServiceModule module : mergedModules) {
       serviceModules.put(module.getId(), module);
       stackInfo.getServices().add(module.getModuleInfo());
@@ -280,12 +329,12 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * @throws AmbariException if an exception occurs while merging child services with their explicit parents
    */
   private void mergeServicesWithExplicitParent(
-      Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException {
+      Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException {
     for (ServiceModule service : serviceModules.values()) {
       ServiceInfo serviceInfo = service.getModuleInfo();
       String parent = serviceInfo.getParent();
       if (parent != null) {
-        mergeServiceWithExplicitParent(service, parent, allStacks, commonServices);
+        mergeServiceWithExplicitParent(service, parent, allStacks, commonServices, extensions);
       }
     }
   }
@@ -301,12 +350,16 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    */
   private void mergeServiceWithExplicitParent(
       ServiceModule service, String parent, Map<String, StackModule> allStacks,
-      Map<String, ServiceModule> commonServices)
+      Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
+
+    LOG.info("mergeServiceWithExplicitParent" + parent);
     if(isCommonServiceParent(parent)) {
-      mergeServiceWithCommonServiceParent(service, parent, allStacks,commonServices);
+      mergeServiceWithCommonServiceParent(service, parent, allStacks, commonServices, extensions);
+    } else if(isExtensionServiceParent(parent)) {
+      mergeServiceWithExtensionServiceParent(service, parent, allStacks, commonServices, extensions);
     } else {
-      mergeServiceWithStackServiceParent(service, parent, allStacks, commonServices);
+      mergeServiceWithStackServiceParent(service, parent, allStacks, commonServices, extensions);
     }
   }
 
@@ -321,6 +374,25 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
         && parent.split(StackManager.PATH_DELIMITER)[0].equalsIgnoreCase(StackManager.COMMON_SERVICES);
   }
 
+  /**
+   * Check if parent is extension service
+   * @param parent  Parent string
+   * @return true: if parent is extension service, false otherwise
+   */
+  private boolean isExtensionServiceParent(String parent) {
+    return parent != null
+        && !parent.isEmpty()
+        && parent.split(StackManager.PATH_DELIMITER)[0].equalsIgnoreCase(StackManager.EXTENSIONS);
+  }
+
+  private void addExtensionServices() throws AmbariException {
+    for (ExtensionModule extension : extensionModules.values()) {
+      stackInfo.getExtensions().add(extension.getModuleInfo());
+      Collection<ServiceModule> services = extension.getServiceModules().values();
+      addServices(services);
+    }
+  }
+
   /**
    * Merge a service with its explicitly specified common service as parent.
    * Parent: common-services/<serviceName>/<serviceVersion>
@@ -337,7 +409,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    */
   private void mergeServiceWithCommonServiceParent(
       ServiceModule service, String parent, Map<String, StackModule> allStacks,
-      Map<String, ServiceModule> commonServices)
+      Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     ServiceInfo serviceInfo = service.getModuleInfo();
     String[] parentToks = parent.split(StackManager.PATH_DELIMITER);
@@ -357,7 +429,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.addError(error);
     } else {
       if (baseService.isValid()) {
-        service.resolveExplicit(baseService, allStacks, commonServices);
+        service.resolveExplicit(baseService, allStacks, commonServices, extensions);
       } else {
         setValid(false);
         stackInfo.setValid(false);
@@ -367,6 +439,50 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     }
   }
 
+  /**
+   * Merge a service with its explicitly specified extension service as parent.
+   * Parent: extensions/<extensionName>/<extensionVersion>/<serviceName>
+   * Example:
+   *  Parent: extensions/EXT_TEST/1.0/CUSTOM_SERVICE
+   *
+   * @param service          the service to merge
+   * @param parent           the explicitly specified extension as parent
+   * @param allStacks        all stacks specified in the stack definition
+   * @param commonServices   all common services
+   * @param extensions       all extensions
+   * @throws AmbariException
+   */
+  private void mergeServiceWithExtensionServiceParent(
+      ServiceModule service, String parent, Map<String, StackModule> allStacks,
+      Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+      throws AmbariException {
+    ServiceInfo serviceInfo = service.getModuleInfo();
+    String[] parentToks = parent.split(StackManager.PATH_DELIMITER);
+    if(parentToks.length != 4 || !parentToks[0].equalsIgnoreCase(StackManager.EXTENSIONS)) {
+      throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
+          + stackInfo.getVersion() + "' extends an invalid parent: '" + parent + "'");
+    }
+
+    String extensionKey = parentToks[1] + StackManager.PATH_DELIMITER + parentToks[2];
+    ExtensionModule extension = extensions.get(extensionKey);
+
+    if (extension == null || !extension.isValid()) {
+      setValid(false);
+      addError("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
+          + stackInfo.getVersion() + "' extends a non-existent service: '" + parent + "'");
+    } else {
+      resolveExtension(extension, allStacks, commonServices, extensions);
+      ServiceModule parentService = extension.getServiceModules().get(parentToks[3]);
+      if (parentService == null || !parentService.isValid()) {
+        setValid(false);
+        addError("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
+            + stackInfo.getVersion() + "' extends a non-existent service: '" + parent + "'");
+      }
+      else
+        service.resolve(parentService, allStacks, commonServices, extensions);
+    }
+  }
+
   /**
    * Merge a service with its explicitly specified stack service as parent.
    * Parent: <stackName>/<stackVersion>/<serviceName>
@@ -378,16 +494,17 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * @param service          the service to merge
    * @param parent           the explicitly specified stack service as parent
    * @param allStacks        all stacks specified in the stack definition
-   * @param commonServices   all common services specified in the stack definition
+   * @param commonServices   all common services
+   * @param extensions       all extensions
    * @throws AmbariException
    */
   private void mergeServiceWithStackServiceParent(
       ServiceModule service, String parent, Map<String, StackModule> allStacks,
-      Map<String, ServiceModule> commonServices)
+      Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     ServiceInfo serviceInfo = service.getModuleInfo();
     String[] parentToks = parent.split(StackManager.PATH_DELIMITER);
-    if(parentToks.length != 3 || parentToks[0].equalsIgnoreCase(StackManager.COMMON_SERVICES)) {
+    if(parentToks.length != 3 || parentToks[0].equalsIgnoreCase(StackManager.EXTENSIONS) || parentToks[0].equalsIgnoreCase(StackManager.COMMON_SERVICES)) {
       throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
           + stackInfo.getVersion() + "' extends an invalid parent: '" + parent + "'");
     }
@@ -399,14 +516,14 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
           + stackInfo.getVersion() + "' extends a service in a non-existent stack: '" + baseStackKey + "'");
     }
 
-    resolveStack(baseStack, allStacks, commonServices);
+    resolveStack(baseStack, allStacks, commonServices, extensions);
 
     ServiceModule baseService = baseStack.serviceModules.get(parentToks[2]);
     if (baseService == null) {
       throw new AmbariException("The service '" + serviceInfo.getName() + "' in stack '" + stackInfo.getName() + ":"
           + stackInfo.getVersion() + "' extends a non-existent service: '" + parent + "'");
       }
-    service.resolveExplicit(baseService, allStacks, commonServices);
+    service.resolveExplicit(baseService, allStacks, commonServices, extensions);
   }
 
   /**
@@ -422,8 +539,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
         + ", stackName = " + stackInfo.getName()
         + ", stackVersion = " + stackInfo.getVersion());
 
-
-    //odo: give additional thought on handling missing metainfo.xml
+    //todo: give additional thought on handling missing metainfo.xml
     StackMetainfoXml smx = stackDirectory.getMetaInfoFile();
     if (smx != null) {
       if (!smx.isValid()) {
@@ -542,13 +658,13 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * @param commonServices all common services specified in the stack definition
    */
   private void mergeConfigurations(
-      StackModule parent, Map<String,StackModule> allStacks, Map<String, ServiceModule> commonServices)
+      StackModule parent, Map<String,StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
       throws AmbariException {
     stackInfo.getProperties().clear();
     stackInfo.setAllConfigAttributes(new HashMap<String, Map<String, Map<String, String>>>());
 
     Collection<ConfigurationModule> mergedModules = mergeChildModules(
-        allStacks, commonServices, configurationModules, parent.configurationModules);
+        allStacks, commonServices, extensions, configurationModules, parent.configurationModules);
     for (ConfigurationModule module : mergedModules) {
       configurationModules.put(module.getId(), module);
       stackInfo.getProperties().addAll(module.getModuleInfo().getProperties());
@@ -565,10 +681,10 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
    * @throws AmbariException if unable to resolve the stack
    */
   private void resolveStack(
-          StackModule stackToBeResolved, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices)
+          StackModule stackToBeResolved, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
           throws AmbariException {
     if (stackToBeResolved.getModuleState() == ModuleState.INIT) {
-      stackToBeResolved.resolve(null, allStacks, commonServices);
+      stackToBeResolved.resolve(null, allStacks, commonServices, extensions);
     } else if (stackToBeResolved.getModuleState() == ModuleState.VISITED) {
       //todo: provide more information to user about cycle
       throw new AmbariException("Cycle detected while parsing stack definition");
@@ -581,6 +697,30 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     }
   }
 
+  /**
+   * Resolve an extension module.
+   *
+   * @param extension              extension module to be resolved
+   * @param allStacks              all stack modules in stack definition
+   * @param commonServices         all common services
+   * @param extensions             all extensions
+   * @throws AmbariException if unable to resolve the stack
+   */
+  private void resolveExtension(
+          ExtensionModule extension, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions)
+          throws AmbariException {
+    if (extension.getModuleState() == ModuleState.INIT) {
+	  extension.resolve(null, allStacks, commonServices, extensions);
+    } else if (extension.getModuleState() == ModuleState.VISITED) {
+      //todo: provide more information to user about cycle
+      throw new AmbariException("Cycle detected while parsing extension definition");
+    }
+    if (!extension.isValid() || (extension.getModuleInfo() != null && !extension.getModuleInfo().isValid())) {
+      setValid(false);
+      addError("Stack includes an invalid extension: " + extension.getModuleInfo().getName());
+    }
+  }
+
   /**
    * Add a child service module to the stack.
    *
@@ -943,6 +1083,23 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     stackInfo.getRoleCommandOrder().merge(parentStack.stackInfo.getRoleCommandOrder());
   }
 
+  /**
+   * Merge role command order with the service
+   *
+   * @param service    service
+   */
+  private void mergeRoleCommandOrder(ServiceModule service) {
+    if (service.getModuleInfo().getRoleCommandOrder() == null)
+      return;
+
+    stackInfo.getRoleCommandOrder().merge(service.getModuleInfo().getRoleCommandOrder(), true);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Role Command Order for " + stackInfo.getName() + "-" + stackInfo.getVersion() +
+        " service " + service.getModuleInfo().getName());
+      stackInfo.getRoleCommandOrder().printRoleCommandOrder(LOG);
+    }
+  }
+
   @Override
   public boolean isValid() {
     return valid;

+ 2 - 4
ambari-server/src/main/java/org/apache/ambari/server/stack/StackServiceDirectory.java

@@ -67,14 +67,13 @@ public class StackServiceDirectory extends ServiceDirectory {
 
   @Override
   /**
-   * Parse stack service directory.
+   * Calculate the stack service directories.
    * packageDir Format: stacks/<stackName>/<stackVersion>/services/<serviceName>/package
    * Example:
    *  directory: "/var/lib/ambari-server/resources/stacks/HDP/2.0.6/services/HDFS"
    *  packageDir: "stacks/HDP/2.0.6/services/HDFS/package"
-   * @throws AmbariException if unable to parse the service directory
    */
-  protected void parsePath() throws AmbariException {
+  protected void calculateDirectories() {
     File serviceDir = new File(getAbsolutePath());
     File stackVersionDir = serviceDir.getParentFile().getParentFile();
     File stackDir = stackVersionDir.getParentFile();
@@ -116,6 +115,5 @@ public class StackServiceDirectory extends ServiceDirectory {
       LOG.debug("Service upgrades folder %s for service %s for stack %s does not exist.",
               absUpgradesDir, serviceDir.getName(), stackId);
     }
-    parseMetaInfoFile();
   }
 }

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/ThemeModule.java

@@ -82,7 +82,8 @@ public class ThemeModule extends BaseModule<ThemeModule, ThemeInfo> implements V
   }
 
   @Override
-  public void resolve(ThemeModule parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices) throws AmbariException {
+  public void resolve(ThemeModule parent, Map<String, StackModule> allStacks,
+		  Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException {
     ThemeInfo parentModuleInfo = parent.getModuleInfo();
 
     if (parent.getModuleInfo() != null && !moduleInfo.isDeleted()) {

+ 160 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionId.java

@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state;
+
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.utils.VersionUtils;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionId implements Comparable<ExtensionId> {
+
+  private static final String NAME_SEPARATOR = "-";
+
+  private String extensionName;
+  private String extensionVersion;
+
+  public ExtensionId() {
+    extensionName = "";
+    extensionVersion = "";
+  }
+
+  public ExtensionId(String extensionId) {
+    parseExtensionIdHelper(this, extensionId);
+  }
+
+  public ExtensionId(ExtensionInfo extension) {
+    extensionName = extension.getName();
+    extensionVersion = extension.getVersion();
+  }
+
+  public ExtensionId(String extensionName, String extensionVersion) {
+    this(extensionName + NAME_SEPARATOR + extensionVersion);
+  }
+
+  public ExtensionId(ExtensionEntity entity) {
+    this(entity.getExtensionName(), entity.getExtensionVersion());
+  }
+
+  /**
+   * @return the extensionName
+   */
+  public String getExtensionName() {
+    return extensionName;
+  }
+
+  /**
+   * @return the extensionVersion
+   */
+  public String getExtensionVersion() {
+    return extensionVersion;
+  }
+
+  /**
+   * @return the extensionVersion
+   */
+  public String getExtensionId() {
+    if (extensionName.isEmpty()
+        && extensionVersion.isEmpty()) {
+      return "";
+    }
+    return extensionName + NAME_SEPARATOR + extensionVersion;
+  }
+
+  /**
+   * @param extensionId the extensionVersion to set
+   */
+  public void setExtensionId(String extensionId) {
+    parseExtensionIdHelper(this, extensionId);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean equals(Object object) {
+    if (!(object instanceof ExtensionId)) {
+      return false;
+    }
+    if (this == object) {
+      return true;
+    }
+    ExtensionId s = (ExtensionId) object;
+    return extensionName.equals(s.extensionName) && extensionVersion.equals(s.extensionVersion);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int hashCode() {
+    int result = extensionName != null ? extensionName.hashCode() : 0;
+    result = 31 * result + (extensionVersion != null ? extensionVersion.hashCode() : 0);
+    return result;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public int compareTo(ExtensionId other) {
+    if (this == other) {
+      return 0;
+    }
+
+    if (other == null) {
+      throw new RuntimeException("Cannot compare with a null value.");
+    }
+
+    int returnValue = getExtensionName().compareTo(other.getExtensionName());
+    if (returnValue == 0) {
+      returnValue = VersionUtils.compareVersions(getExtensionVersion(), other.getExtensionVersion());
+    } else {
+      throw new RuntimeException("ExtensionId with different names cannot be compared.");
+    }
+    return returnValue;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String toString() {
+    return getExtensionId();
+  }
+
+  private void parseExtensionIdHelper(ExtensionId extensionVersion, String extensionId) {
+    if (extensionId == null || extensionId.isEmpty()) {
+      extensionVersion.extensionName = "";
+      extensionVersion.extensionVersion = "";
+      return;
+    }
+
+    int pos = extensionId.indexOf('-');
+    if (pos == -1 || (extensionId.length() <= (pos + 1))) {
+      throw new RuntimeException("Could not parse invalid Extension Id" + ", extensionId=" + extensionId);
+    }
+
+    extensionVersion.extensionName = extensionId.substring(0, pos);
+    extensionVersion.extensionVersion = extensionId.substring(pos + 1);
+  }
+}

+ 208 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java

@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.ExtensionVersionResponse;
+import org.apache.ambari.server.stack.Validable;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+
+/**
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
+  private String name;
+  private String version;
+  private Collection<ServiceInfo> services;
+  private String parentExtensionVersion;
+
+  private List<ExtensionMetainfoXml.Stack> stacks;
+  private List<ExtensionMetainfoXml.Extension> extensions;
+  private boolean valid = true;
+
+  /**
+   *
+   * @return valid xml flag
+   */
+  @Override
+  public boolean isValid() {
+    return valid;
+  }
+
+  /**
+   *
+   * @param valid set validity flag
+   */
+  @Override
+  public void setValid(boolean valid) {
+    this.valid = valid;
+  }
+
+  private Set<String> errorSet = new HashSet<String>();
+
+  @Override
+  public void addError(String error) {
+    errorSet.add(error);
+  }
+
+  @Override
+  public Collection<String> getErrors() {
+    return errorSet;
+  }
+
+  @Override
+  public void addErrors(Collection<String> errors) {
+    this.errorSet.addAll(errors);
+  }
+
+  //private String stackHooksFolder;
+
+  private String upgradesFolder = null;
+
+  private volatile Map<String, PropertyInfo> requiredProperties;
+
+  public String getName() {
+    return name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public String getVersion() {
+    return version;
+  }
+
+  public void setVersion(String version) {
+    this.version = version;
+  }
+
+  public synchronized Collection<ServiceInfo> getServices() {
+    if (services == null) services = new ArrayList<ServiceInfo>();
+    return services;
+  }
+
+  public ServiceInfo getService(String name) {
+    Collection<ServiceInfo> services = getServices();
+    for (ServiceInfo service : services) {
+      if (service.getName().equals(name)) {
+        return service;
+      }
+    }
+    //todo: exception?
+    return null;
+  }
+
+  public synchronized void setServices(Collection<ServiceInfo> services) {
+    this.services = services;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Extension name:" + name + "\nversion:" +
+      version + " \nvalid:" + isValid());
+    if (services != null) {
+      sb.append("\n\t\tService:");
+      for (ServiceInfo service : services) {
+        sb.append("\t\t");
+        sb.append(service);
+      }
+    }
+
+    return sb.toString();
+  }
+
+
+  @Override
+  public int hashCode() {
+    return 31  + name.hashCode() + version.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof ExtensionInfo)) {
+      return false;
+    }
+    if (this == obj) {
+      return true;
+    }
+    ExtensionInfo extInfo = (ExtensionInfo) obj;
+    return getName().equals(extInfo.getName()) && getVersion().equals(extInfo.getVersion());
+  }
+
+  public ExtensionVersionResponse convertToResponse() {
+    Collection<ServiceInfo> serviceInfos = getServices();
+    // The collection of service descriptor files. A Set is being used because some Kerberos descriptor
+    // files contain multiple services, therefore the same File may be encountered more than once.
+    // For example the YARN directory may contain YARN and MAPREDUCE2 services.
+    Collection<File> serviceDescriptorFiles = new HashSet<File>();
+    if (serviceInfos != null) {
+      for (ServiceInfo serviceInfo : serviceInfos) {
+        File file = serviceInfo.getKerberosDescriptorFile();
+        if (file != null) {
+          serviceDescriptorFiles.add(file);
+        }
+      }
+    }
+
+    return new ExtensionVersionResponse(getVersion(), getParentExtensionVersion(),
+                                        isValid(), getErrors());
+  }
+
+  public String getParentExtensionVersion() {
+    return parentExtensionVersion;
+  }
+
+  public void setParentExtensionVersion(String parentExtensionVersion) {
+    this.parentExtensionVersion = parentExtensionVersion;
+  }
+
+  @Override
+  public int compareTo(ExtensionInfo o) {
+    String myId = name + "-" + version;
+    String oId = o.name + "-" + o.version;
+    return myId.compareTo(oId);
+  }
+
+  public List<ExtensionMetainfoXml.Stack> getStacks() {
+    return stacks;
+  }
+
+  public void setStacks(List<ExtensionMetainfoXml.Stack> stacks) {
+    this.stacks = stacks;
+  }
+
+  public List<ExtensionMetainfoXml.Extension> getExtensions() {
+    return extensions;
+  }
+
+  public void setExtensions(List<ExtensionMetainfoXml.Extension> extensions) {
+    this.extensions = extensions;
+  }
+}

+ 11 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java

@@ -27,6 +27,7 @@ import com.google.common.collect.Multimaps;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.stack.Validable;
 import org.apache.ambari.server.state.stack.MetricDefinition;
+import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.map.annotate.JsonFilter;
 
@@ -145,6 +146,8 @@ public class ServiceInfo implements Validable{
   @XmlTransient
   private File widgetsDescriptorFile = null;
 
+  private StackRoleCommandOrder roleCommandOrder;
+
   @XmlTransient
   private boolean valid = true;
 
@@ -730,6 +733,14 @@ public String getVersion() {
     this.widgetsDescriptorFile = widgetsDescriptorFile;
   }
 
+  public StackRoleCommandOrder getRoleCommandOrder() {
+    return roleCommandOrder;
+  }
+
+  public void setRoleCommandOrder(StackRoleCommandOrder roleCommandOrder) {
+    this.roleCommandOrder = roleCommandOrder;
+  }
+
   /**
    * @return config types this service contains configuration for, but which are primarily related to another service
    */

+ 29 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java

@@ -49,6 +49,7 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
   private String widgetsDescriptorFileLocation;
   private List<RepositoryInfo> repositories;
   private Collection<ServiceInfo> services;
+  private Collection<ExtensionInfo> extensions;
   private String parentStackVersion;
   // stack-level properties
   private List<PropertyInfo> properties;
@@ -161,6 +162,34 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
     this.services = services;
   }
 
+  public synchronized Collection<ExtensionInfo> getExtensions() {
+    if (extensions == null) extensions = new ArrayList<ExtensionInfo>();
+    return extensions;
+  }
+
+  public ExtensionInfo getExtension(String name) {
+    Collection<ExtensionInfo> extensions = getExtensions();
+    for (ExtensionInfo extension : extensions) {
+      if (extension.getName().equals(name)) {
+        return extension;
+      }
+    }
+    //todo: exception?
+    return null;
+  }
+
+  public ExtensionInfo getExtensionByService(String serviceName) {
+    Collection<ExtensionInfo> extensions = getExtensions();
+    for (ExtensionInfo extension : extensions) {
+      for (ServiceInfo service : services) {
+        if (service.getName().equals(serviceName))
+          return extension;
+      }
+    }
+    //todo: exception?
+    return null;
+  }
+
   public List<PropertyInfo> getProperties() {
     if (properties == null) properties = new ArrayList<PropertyInfo>();
     return properties;

+ 204 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java

@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.stack;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlElements;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
+
+import org.apache.ambari.server.stack.Validable;
+
+/**
+ * Represents the extension <code>metainfo.xml</code> file.
+ *
+ * An extension version is like a stack version but it contains custom services.  Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+@XmlRootElement(name="metainfo")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class ExtensionMetainfoXml implements Validable{
+
+  @XmlElement(name="extends")
+  private String extendsVersion = null;
+
+  @XmlElement(name="versions")
+  private Version version = new Version();
+
+  @XmlElement(name="prerequisites")
+  private Prerequisites prerequisites = new Prerequisites();
+
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class Prerequisites {
+    private Prerequisites() {
+    }
+    @XmlElementWrapper(name="min-stack-versions")
+    @XmlElements(@XmlElement(name="stack"))
+    private List<Stack> stacks = new ArrayList<Stack>();
+
+    @XmlElementWrapper(name="min-extension-versions")
+    @XmlElements(@XmlElement(name="extension"))
+    private List<Extension> extensions = new ArrayList<Extension>();
+
+    public List<Stack> getStacks() {
+      return stacks;
+    }
+
+    public List<Extension> getExtensions() {
+      return extensions;
+    }
+  }
+
+  @XmlTransient
+  private boolean valid = true;
+
+  /**
+   *
+   * @return valid xml flag
+   */
+  @Override
+  public boolean isValid() {
+    return valid;
+  }
+
+  /**
+   *
+   * @param valid set validity flag
+   */
+  @Override
+  public void setValid(boolean valid) {
+    this.valid = valid;
+  }
+
+  @XmlTransient
+  private Set<String> errorSet = new HashSet<String>();
+
+  @Override
+  public void addError(String error) {
+    errorSet.add(error);
+  }
+
+  @Override
+  public Collection<String> getErrors() {
+    return errorSet;
+  }
+
+  @Override
+  public void addErrors(Collection<String> errors) {
+    this.errorSet.addAll(errors);
+  }
+
+  /**
+   * @return the parent stack version number
+   */
+  public String getExtends() {
+    return extendsVersion;
+  }
+
+  /**
+   * @return gets the version
+   */
+  public Version getVersion() {
+    return version;
+  }
+
+  public List<Stack> getStacks() {
+    return prerequisites.getStacks();
+  }
+
+  public List<Extension> getExtensions() {
+    return prerequisites.getExtensions();
+  }
+
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class Version {
+    private Version() {
+    }
+    private boolean active = false;
+    private String upgrade = null;
+
+    /**
+     * @return <code>true</code> if the stack is active
+     */
+    public boolean isActive() {
+      return active;
+    }
+
+    /**
+     * @return the upgrade version number, if set
+     */
+    public String getUpgrade() {
+      return upgrade;
+    }
+  }
+
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class Stack {
+    private Stack() {
+    }
+    private String name = null;
+    private String version = null;
+
+    /**
+     * @return the stack name
+     */
+    public String getName() {
+      return name;
+    }
+
+    /**
+     * @return the stack version, this may be something like 1.0.*
+     */
+    public String getVersion() {
+      return version;
+    }
+  }
+
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class Extension {
+    private Extension() {
+    }
+    private String name = null;
+    private String version = null;
+
+    /**
+     * @return the extension name
+     */
+    public String getName() {
+      return name;
+    }
+
+    /**
+     * @return the extension version, this may be something like 1.0.*
+     */
+    public String getVersion() {
+      return version;
+    }
+  }
+
+}

+ 4 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/stack/ServiceMetainfoXml.java

@@ -86,6 +86,10 @@ public class ServiceMetainfoXml implements Validable{
   public List<ServiceInfo> getServices() {
     return services;
   }
+
+  public void setServices(List<ServiceInfo> services) {
+    this.services = services;
+  }
   
   public String getSchemaVersion() {
     return schemaVersion;

+ 50 - 3
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java

@@ -161,7 +161,6 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   protected static final String PHOENIX_QUERY_SERVER_PRINCIPAL_KEY = "phoenix.queryserver.kerberos.principal";
   protected static final String PHOENIX_QUERY_SERVER_KEYTAB_KEY = "phoenix.queryserver.keytab.file";
 
-
   private static final String OOZIE_ENV_CONFIG = "oozie-env";
   private static final String HIVE_ENV_CONFIG = "hive-env";
   private static final String AMS_SITE = "ams-site";
@@ -174,6 +173,10 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   protected static final String HBASE_SITE_CONFIG = "hbase-site";
   protected static final String HBASE_SPNEGO_PRINCIPAL_KEY = "hbase.security.authentication.spnego.kerberos.principal";
   protected static final String HBASE_SPNEGO_KEYTAB_KEY = "hbase.security.authentication.spnego.kerberos.keytab";
+  protected static final String EXTENSION_TABLE = "extension";
+  protected static final String EXTENSION_ID_COLUMN = "extension_id";
+  protected static final String EXTENSION_LINK_TABLE = "extensionlink";
+  protected static final String EXTENSION_LINK_ID_COLUMN = "link_id";
 
   private static final Map<String, Integer> ROLE_ORDER;
 
@@ -265,6 +268,8 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
   protected void executeDDLUpdates() throws AmbariException, SQLException {
     updateAdminPermissionTable();
     updateServiceComponentDesiredStateTable();
+    createExtensionTable();
+    createExtensionLinkTable();
     createSettingTable();
     updateRepoVersionTableDDL();
     updateServiceComponentDesiredStateTableDDL();
@@ -292,7 +297,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     columns.add(new DBColumnInfo("username", String.class, 255, null, false));
     columns.add(new DBColumnInfo("password", String.class, 255, null, false));
     dbAccessor.createTable(REMOTE_AMBARI_CLUSTER_TABLE, columns, CLUSTER_ID);
-    dbAccessor.addUniqueConstraint(REMOTE_AMBARI_CLUSTER_TABLE , "unq_remote_ambari_cluster" , CLUSTER_NAME);
+    dbAccessor.addUniqueConstraint(REMOTE_AMBARI_CLUSTER_TABLE , "UQ_remote_ambari_cluster" , CLUSTER_NAME);
     addSequence("remote_cluster_id_seq", 1L, false);
 
     List<DBColumnInfo> remoteClusterServiceColumns = new ArrayList<>();
@@ -465,6 +470,48 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     }
   }
 
+  private void createExtensionTable() throws SQLException {
+    List<DBColumnInfo> columns = new ArrayList<>();
+
+    // Add extension table
+    LOG.info("Creating " + EXTENSION_TABLE + " table");
+
+    columns.add(new DBColumnInfo(EXTENSION_ID_COLUMN, Long.class, null, null, false));
+    columns.add(new DBColumnInfo("extension_name", String.class, 255, null, false));
+    columns.add(new DBColumnInfo("extension_version", String.class, 255, null, false));
+    dbAccessor.createTable(EXTENSION_TABLE, columns, EXTENSION_ID_COLUMN);
+
+    // create UNIQUE constraint, ensuring column order matches SQL files
+    String[] uniqueColumns = new String[] { "extension_name", "extension_version" };
+    dbAccessor.addUniqueConstraint(EXTENSION_TABLE, "UQ_extension", uniqueColumns);
+
+    addSequence("extension_id_seq", 0L, false);
+  }
+
+  private void createExtensionLinkTable() throws SQLException {
+    List<DBColumnInfo> columns = new ArrayList<>();
+
+    // Add extension link table
+    LOG.info("Creating " + EXTENSION_LINK_TABLE + " table");
+
+    columns.add(new DBColumnInfo(EXTENSION_LINK_ID_COLUMN, Long.class, null, null, false));
+    columns.add(new DBColumnInfo("stack_id", Long.class, null, null, false));
+    columns.add(new DBColumnInfo(EXTENSION_ID_COLUMN, Long.class, null, null, false));
+    dbAccessor.createTable(EXTENSION_LINK_TABLE, columns, EXTENSION_LINK_ID_COLUMN);
+
+    // create UNIQUE constraint, ensuring column order matches SQL files
+    String[] uniqueColumns = new String[] { "stack_id", EXTENSION_ID_COLUMN };
+    dbAccessor.addUniqueConstraint(EXTENSION_LINK_TABLE, "UQ_extension_link", uniqueColumns);
+
+    dbAccessor.addFKConstraint(EXTENSION_LINK_TABLE, "FK_extensionlink_extension_id",
+      EXTENSION_ID_COLUMN, EXTENSION_TABLE, EXTENSION_ID_COLUMN, false);
+
+    dbAccessor.addFKConstraint(EXTENSION_LINK_TABLE, "FK_extensionlink_stack_id",
+      "stack_id", STACK_TABLE, "stack_id", false);
+
+    addSequence("link_id_seq", 0L, false);
+  }
+
   private void createSettingTable() throws SQLException {
     List<DBColumnInfo> columns = new ArrayList<>();
 
@@ -1429,7 +1476,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
 
     // create UNIQUE constraint, ensuring column order matches SQL files
     String[] uniqueColumns = new String[] { "component_name", "service_name", "cluster_id" };
-    dbAccessor.addUniqueConstraint(SERVICE_COMPONENT_DS_TABLE, "unq_scdesiredstate_name",
+    dbAccessor.addUniqueConstraint(SERVICE_COMPONENT_DS_TABLE, "UQ_scdesiredstate_name",
         uniqueColumns);
 
     // add FKs back to SCDS in both HCDS and HCS tables

+ 23 - 3
ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql

@@ -22,7 +22,23 @@ CREATE TABLE stack(
   stack_name VARCHAR(255) NOT NULL,
   stack_version VARCHAR(255) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version));
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version));
+
+CREATE TABLE extension(
+  extension_id BIGINT NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY (extension_id),
+  CONSTRAINT UQ_extension UNIQUE(extension_name, extension_version));
+
+CREATE TABLE extensionlink(
+  link_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  extension_id BIGINT NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE(stack_id, extension_id));
 
 CREATE TABLE adminresourcetype (
   resource_type_id INTEGER NOT NULL,
@@ -176,7 +192,7 @@ CREATE TABLE servicecomponentdesiredstate (
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
@@ -789,7 +805,7 @@ CREATE TABLE remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name));
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name));
 
 CREATE TABLE remoteambariclusterservice(
   id BIGINT NOT NULL,
@@ -1092,6 +1108,10 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value)
   union all
   select 'stack_id_seq', 0 FROM SYSIBM.SYSDUMMY1
   union all
+  select 'extension_id_seq', 0 FROM SYSIBM.SYSDUMMY1
+  union all
+  select 'link_id_seq', 0 FROM SYSIBM.SYSDUMMY1
+  union all
   select 'topology_host_info_id_seq', 0 FROM SYSIBM.SYSDUMMY1
   union all
   select 'topology_host_request_id_seq', 0 FROM SYSIBM.SYSDUMMY1

+ 21 - 3
ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql

@@ -32,7 +32,23 @@ CREATE TABLE stack(
   stack_name VARCHAR(100) NOT NULL,
   stack_version VARCHAR(100) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version));
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version));
+
+CREATE TABLE extension(
+  extension_id BIGINT NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY (extension_id),
+  CONSTRAINT UQ_extension UNIQUE (extension_name, extension_version));
+
+CREATE TABLE extensionlink(
+  link_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  extension_id BIGINT NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY (link_id),
+  CONSTRAINT UQ_extension_link UNIQUE (stack_id, extension_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id));
 
 CREATE TABLE adminresourcetype (
   resource_type_id INTEGER NOT NULL,
@@ -176,7 +192,7 @@ CREATE TABLE servicecomponentdesiredstate (
   service_name VARCHAR(100) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
@@ -796,7 +812,7 @@ CREATE TABLE remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name));
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name));
 
 CREATE TABLE remoteambariclusterservice(
   id BIGINT NOT NULL,
@@ -1061,6 +1077,8 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) VALUES
   ('upgrade_group_id_seq', 0),
   ('upgrade_item_id_seq', 0),
   ('stack_id_seq', 0),
+  ('extension_id_seq', 0),
+  ('link_id_seq', 0),
   ('widget_id_seq', 0),
   ('widget_layout_id_seq', 0),
   ('topology_host_info_id_seq', 0),

+ 21 - 3
ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql

@@ -22,7 +22,23 @@ CREATE TABLE stack(
   stack_name VARCHAR2(255) NOT NULL,
   stack_version VARCHAR2(255) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version));
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version));
+
+CREATE TABLE extension(
+  extension_id NUMERIC(19) NOT NULL,
+  extension_name VARCHAR2(255) NOT NULL,
+  extension_version VARCHAR2(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY (extension_id),
+  CONSTRAINT UQ_extension UNIQUE(extension_name, extension_version));
+
+CREATE TABLE extensionlink(
+  link_id NUMERIC(19) NOT NULL,
+  stack_id NUMERIC(19) NOT NULL,
+  extension_id NUMERIC(19) NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE(stack_id, extension_id));
 
 CREATE TABLE adminresourcetype (
   resource_type_id NUMBER(10) NOT NULL,
@@ -167,7 +183,7 @@ CREATE TABLE servicecomponentdesiredstate (
   service_name VARCHAR2(255) NOT NULL,
   recovery_enabled SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
@@ -786,7 +802,7 @@ CREATE TABLE remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name));
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name));
 
 CREATE TABLE remoteambariclusterservice(
   id NUMBER(19) NOT NULL,
@@ -1051,6 +1067,8 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_id_
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_group_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_item_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('stack_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('extension_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('link_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('widget_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('widget_layout_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_host_info_id_seq', 0);

+ 21 - 3
ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql

@@ -22,7 +22,23 @@ CREATE TABLE stack(
   stack_name VARCHAR(255) NOT NULL,
   stack_version VARCHAR(255) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version));
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version));
+
+CREATE TABLE extension(
+  extension_id BIGINT NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY (extension_id),
+  CONSTRAINT UQ_extension UNIQUE(extension_name, extension_version));
+
+CREATE TABLE extensionlink(
+  link_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  extension_id BIGINT NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE(stack_id, extension_id));
 
 CREATE TABLE adminresourcetype (
   resource_type_id INTEGER NOT NULL,
@@ -176,7 +192,7 @@ CREATE TABLE servicecomponentdesiredstate (
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
@@ -788,7 +804,7 @@ CREATE TABLE remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name));
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name));
 
 CREATE TABLE remoteambariclusterservice(
   id BIGINT NOT NULL,
@@ -1054,6 +1070,8 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value) VALUES
   ('widget_layout_id_seq', 0),
   ('upgrade_item_id_seq', 0),
   ('stack_id_seq', 0),
+  ('extension_id_seq', 0),
+  ('link_id_seq', 0),
   ('topology_host_info_id_seq', 0),
   ('topology_host_request_id_seq', 0),
   ('topology_host_task_id_seq', 0),

+ 23 - 3
ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql

@@ -33,10 +33,28 @@ CREATE TABLE ambari.stack(
   stack_name VARCHAR(255) NOT NULL,
   stack_version VARCHAR(255) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version)
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version)
 );
 GRANT ALL PRIVILEGES ON TABLE ambari.stack TO :username;
 
+CREATE TABLE ambari.extension(
+  extension_id BIGINT NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY (extension_id),
+  CONSTRAINT UQ_extension UNIQUE(extension_name, extension_version));
+GRANT ALL PRIVILEGES ON TABLE ambari.extension TO :username;
+
+CREATE TABLE ambari.extensionlink(
+  link_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  extension_id BIGINT NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES ambari.stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES ambari.extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE(stack_id, extension_id));
+GRANT ALL PRIVILEGES ON TABLE ambari.extensionlink TO :username;
+
 CREATE TABLE ambari.adminresourcetype (
   resource_type_id INTEGER NOT NULL,
   resource_type_name VARCHAR(255) NOT NULL,
@@ -215,7 +233,7 @@ CREATE TABLE ambari.servicecomponentdesiredstate (
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES ambari.stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES ambari.clusterservices (service_name, cluster_id)
 );
@@ -924,7 +942,7 @@ CREATE TABLE ambari.remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name)
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name)
 );
 GRANT ALL PRIVILEGES ON TABLE ambari.remoteambaricluster TO :username;
 
@@ -1216,6 +1234,8 @@ INSERT INTO ambari.ambari_sequences (sequence_name, sequence_value) VALUES
   ('widget_layout_id_seq', 0),
   ('upgrade_item_id_seq', 0),
   ('stack_id_seq', 0),
+  ('extension_id_seq', 0),
+  ('link_id_seq', 0),
   ('topology_host_info_id_seq', 0),
   ('topology_host_request_id_seq', 0),
   ('topology_host_task_id_seq', 0),

+ 21 - 3
ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql

@@ -21,7 +21,23 @@ CREATE TABLE stack(
   stack_name VARCHAR(255) NOT NULL,
   stack_version VARCHAR(255) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version));
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version));
+
+CREATE TABLE extension(
+  extension_id NUMERIC(19) NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY (extension_id),
+  CONSTRAINT UQ_extension UNIQUE(extension_name, extension_version));
+
+CREATE TABLE extensionlink(
+  link_id NUMERIC(19) NOT NULL,
+  stack_id NUMERIC(19) NOT NULL,
+  extension_id NUMERIC(19) NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE(stack_id, extension_id));
 
 CREATE TABLE adminresourcetype (
   resource_type_id INTEGER NOT NULL,
@@ -165,7 +181,7 @@ CREATE TABLE servicecomponentdesiredstate (
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
@@ -785,7 +801,7 @@ CREATE TABLE remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name));
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name));
 
 CREATE TABLE remoteambariclusterservice(
   id NUMERIC(19) NOT NULL,
@@ -1050,6 +1066,8 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_id_
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_group_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('upgrade_item_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('stack_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('extension_id_seq', 0);
+INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('link_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('widget_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('widget_layout_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('topology_host_info_id_seq', 0);

+ 21 - 3
ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql

@@ -34,7 +34,23 @@ CREATE TABLE stack(
   stack_name VARCHAR(255) NOT NULL,
   stack_version VARCHAR(255) NOT NULL,
   CONSTRAINT PK_stack PRIMARY KEY CLUSTERED (stack_id),
-  CONSTRAINT unq_stack UNIQUE (stack_name, stack_version));
+  CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version));
+
+CREATE TABLE extension(
+  extension_id BIGINT NOT NULL,
+  extension_name VARCHAR(255) NOT NULL,
+  extension_version VARCHAR(255) NOT NULL,
+  CONSTRAINT PK_extension PRIMARY KEY CLUSTERED (extension_id),
+  CONSTRAINT UQ_extension UNIQUE (extension_name, extension_version));
+
+CREATE TABLE extensionlink(
+  link_id BIGINT NOT NULL,
+  stack_id BIGINT NOT NULL,
+  extension_id BIGINT NOT NULL,
+  CONSTRAINT PK_extensionlink PRIMARY KEY CLUSTERED (link_id),
+  CONSTRAINT FK_extensionlink_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_extensionlink_extension_id FOREIGN KEY (extension_id) REFERENCES extension(extension_id),
+  CONSTRAINT UQ_extension_link UNIQUE (stack_id, extension_id);
 
 CREATE TABLE adminresourcetype (
   resource_type_id INTEGER NOT NULL,
@@ -189,7 +205,7 @@ CREATE TABLE servicecomponentdesiredstate (
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
-  CONSTRAINT unq_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
+  CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
@@ -806,7 +822,7 @@ CREATE TABLE remoteambaricluster(
   url VARCHAR(255) NOT NULL,
   password VARCHAR(255) NOT NULL,
   CONSTRAINT PK_remote_ambari_cluster PRIMARY KEY (cluster_id),
-  CONSTRAINT unq_remote_ambari_cluster UNIQUE (name));
+  CONSTRAINT UQ_remote_ambari_cluster UNIQUE (name));
 
 CREATE TABLE remoteambariclusterservice(
   id BIGINT NOT NULL,
@@ -1078,6 +1094,8 @@ BEGIN TRANSACTION
     ('widget_layout_id_seq', 0),
     ('upgrade_item_id_seq', 0),
     ('stack_id_seq', 0),
+    ('extension_id_seq', 0),
+    ('link_id_seq', 0),
     ('topology_host_info_id_seq', 0),
     ('topology_host_request_id_seq', 0),
     ('topology_host_task_id_seq', 0),

+ 2 - 0
ambari-server/src/main/resources/META-INF/persistence.xml

@@ -33,6 +33,8 @@
     <class>org.apache.ambari.server.orm.entities.ConfigGroupEntity</class>
     <class>org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity</class>
     <class>org.apache.ambari.server.orm.entities.ExecutionCommandEntity</class>
+    <class>org.apache.ambari.server.orm.entities.ExtensionEntity</class>
+    <class>org.apache.ambari.server.orm.entities.ExtensionLinkEntity</class>
     <class>org.apache.ambari.server.orm.entities.GroupEntity</class>
     <class>org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity</class>
     <class>org.apache.ambari.server.orm.entities.HostComponentStateEntity</class>

+ 31 - 0
ambari-server/src/main/resources/extensions/README.txt

@@ -0,0 +1,31 @@
+Extensions should include a folder with the extension name.
+Subfolders of the extension name folder represents different
+extension versions.
+
+For a sample extension MY_EXT 1.0, I would create subfolders: MY_EXT/1.0
+
+Within each extension version folder, there should be both a metainfo.xml
+file and a services folder.  The metainfo.xml should contain the
+stack versions with which the extension version are compatible.
+
+For example the following metainfo.xml shows an extension that is
+compatible with both HDP 2.4 and HDP 2.5:
+
+<metainfo>
+  <prerequisites>
+    <min-stack-versions>
+      <stack>
+        <name>HDP</name>
+        <version>2.4</version>
+      </stack>
+      <stack>
+        <name>HDP</name>
+        <version>2.5</version>
+      </stack>
+    </min-stack-versions>
+  </prerequisites>
+</metainfo>
+
+The services folder will contain all services that are part of the
+extension version.  The contents of those service folders will be the
+same as what you would find in under a stack version's services folder.

+ 15 - 1
ambari-server/src/main/resources/key_properties.json

@@ -79,13 +79,27 @@
     "StackService": "StackServiceComponents/service_name",
     "StackServiceComponent": "StackServiceComponents/component_name"
   },
-    "StackServiceComponentDependency": {
+  "StackServiceComponentDependency": {
     "Stack": "Dependencies/stack_name",
     "StackVersion": "Dependencies/stack_version",
     "StackService": "Dependencies/dependent_service_name",
     "StackServiceComponent": "Dependencies/dependent_component_name",
     "StackServiceComponentDependency": "Dependencies/component_name"
   },
+  "ExtensionLink": {
+    "ExtensionLink": "ExtensionLink/link_id",
+    "Stack": "ExtensionLink/stack_name",
+    "StackVersion": "ExtensionLink/stack_version",
+    "Extension": "ExtensionLink/extension_name",
+    "ExtensionVersion": "ExtensionLink/extension_version"
+  },
+  "Extension": {
+    "Extension": "Extensions/extension_name"
+  },
+  "ExtensionVersion": {
+    "Extension": "Versions/extension_name",
+    "ExtensionVersion": "Versions/extension_version"
+  },
   "DRFeed": {
     "DRFeed": "Feed/name"
   },

+ 25 - 5
ambari-server/src/main/resources/properties.json

@@ -265,6 +265,26 @@
         "auto_deploy/enabled",
         "auto_deploy/location"
     ],
+    "ExtensionLink": [
+        "ExtensionLink/link_id",
+        "ExtensionLink/stack_name",
+        "ExtensionLink/stack_version",
+        "ExtensionLink/extension_name",
+        "ExtensionLink/extension_version",
+        "_"
+    ],
+    "Extension":[
+        "Extensions/extension_name",
+        "_"
+    ],
+    "ExtensionVersion":[
+        "Versions/extension_name",
+        "Versions/extension_version",
+        "Versions/valid",
+        "Versions/extension-errors",
+        "Versions/parent_extension_version",
+        "_"
+    ],
     "DRFeed":[
         "Feed/name",
         "Feed/description",
@@ -469,10 +489,10 @@
         "StackConfigurationDependency/dependency_name",
         "_"
     ],
-  "KerberosDescriptor":[
-    "KerberosDescriptors/kerberos_descriptor_name",
-    "KerberosDescriptors/kerberos_descriptor_text",
-    "_"
-  ]
+    "KerberosDescriptor":[
+        "KerberosDescriptors/kerberos_descriptor_name",
+        "KerberosDescriptors/kerberos_descriptor_text",
+        "_"
+    ]
 
 }

+ 119 - 0
ambari-server/src/test/java/org/apache/ambari/server/api/services/ExtensionsServiceTest.java

@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import org.apache.ambari.server.api.resources.ResourceInstance;
+import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
+import org.apache.ambari.server.api.services.serializers.ResultSerializer;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+
+import javax.ws.rs.PathParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.lang.reflect.Method;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+
+
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.notNull;
+import static org.easymock.EasyMock.same;
+import static org.junit.Assert.assertEquals;
+
+/**
+* Unit tests for ExtensionsService.
+*/
+public class ExtensionsServiceTest extends BaseServiceTest {
+
+  @Override
+  public List<ServiceTestInvocation> getTestInvocations() throws Exception {
+    List<ServiceTestInvocation> listInvocations = new ArrayList<ServiceTestInvocation>();
+
+    // getExtension
+    ExtensionsService service = new TestExtensionsService("extensionName", null);
+    Method m = service.getClass().getMethod("getExtension", String.class, HttpHeaders.class, UriInfo.class, String.class);
+    Object[] args = new Object[] {null, getHttpHeaders(), getUriInfo(), "extensionName"};
+    listInvocations.add(new ServiceTestInvocation(Request.Type.GET, service, m, args, null));
+
+    //getExtensions
+    service = new TestExtensionsService(null, null);
+    m = service.getClass().getMethod("getExtensions", String.class, HttpHeaders.class, UriInfo.class);
+    args = new Object[] {null, getHttpHeaders(), getUriInfo()};
+    listInvocations.add(new ServiceTestInvocation(Request.Type.GET, service, m, args, null));
+
+    // getExtensionVersion
+    service = new TestExtensionsService("extensionName", "extensionVersion");
+    m = service.getClass().getMethod("getExtensionVersion", String.class, HttpHeaders.class, UriInfo.class, String.class, String.class);
+    args = new Object[] {null, getHttpHeaders(), getUriInfo(), "extensionName", "extensionVersion"};
+    listInvocations.add(new ServiceTestInvocation(Request.Type.GET, service, m, args, null));
+
+    // getExtensionVersions
+    service = new TestExtensionsService("extensionName", null);
+    m = service.getClass().getMethod("getExtensionVersions", String.class, HttpHeaders.class, UriInfo.class, String.class);
+    args = new Object[] {null, getHttpHeaders(), getUriInfo(), "extensionName"};
+    listInvocations.add(new ServiceTestInvocation(Request.Type.GET, service, m, args, null));
+
+    return listInvocations;
+  }
+
+  private class TestExtensionsService extends ExtensionsService {
+
+    private String m_extensionId;
+    private String m_extensionVersion;
+
+    private TestExtensionsService(String extensionName, String extensionVersion) {
+      m_extensionId = extensionName;
+      m_extensionVersion = extensionVersion;
+    }
+
+    @Override
+    ResourceInstance createExtensionResource(String extensionName) {
+      assertEquals(m_extensionId, extensionName);
+      return getTestResource();
+    }
+
+    @Override
+    ResourceInstance createExtensionVersionResource(String extensionName, String extensionVersion) {
+      assertEquals(m_extensionId, extensionName);
+      assertEquals(m_extensionVersion, extensionVersion);
+      return getTestResource();
+    }
+
+    @Override
+    RequestFactory getRequestFactory() {
+      return getTestRequestFactory();
+    }
+
+    @Override
+    protected RequestBodyParser getBodyParser() {
+      return getTestBodyParser();
+    }
+
+    @Override
+    protected ResultSerializer getResultSerializer() {
+      return getTestResultSerializer();
+    }
+  }
+}

+ 91 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExtensionResourceProviderTest.java

@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.ExtensionRequest;
+import org.apache.ambari.server.controller.ExtensionResponse;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+/**
+ * ExtensionResourceProvider Test
+ */
+public class ExtensionResourceProviderTest {
+  @Test
+  public void testGetResources() throws Exception {
+    Resource.Type type = Resource.Type.Extension;
+
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+
+    Set<ExtensionResponse> allResponse = new HashSet<ExtensionResponse>();
+    allResponse.add(new ExtensionResponse("Extension1"));
+    allResponse.add(new ExtensionResponse("Extension2"));
+
+    // set expectations
+    expect(managementController.getExtensions(EasyMock.<Set<ExtensionRequest>>anyObject())).andReturn(allResponse).once();
+
+    // replay
+    replay(managementController);
+
+    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+        type,
+        PropertyHelper.getPropertyIds(type),
+        PropertyHelper.getKeyPropertyIds(type),
+        managementController);
+
+    Set<String> propertyIds = new HashSet<String>();
+
+    propertyIds.add(ExtensionResourceProvider.EXTENSION_NAME_PROPERTY_ID);
+
+    // create the request
+    Request request = PropertyHelper.getReadRequest(propertyIds);
+
+    // get all ... no predicate
+    Set<Resource> resources = provider.getResources(request, null);
+
+    Assert.assertEquals(2, resources.size());
+
+
+    Set<String> extensionNames = new HashSet<String>();
+    extensionNames.add("Extension1");
+    extensionNames.add("Extension2");
+
+    for (Resource resource : resources) {
+      String extensionName = (String) resource.getPropertyValue(ExtensionResourceProvider.EXTENSION_NAME_PROPERTY_ID);
+      Assert.assertTrue(extensionNames.contains(extensionName));
+    }
+
+    // verify
+    verify(managementController);
+  }
+}

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/stack/ComponentModuleTest.java

@@ -495,7 +495,7 @@ public class ComponentModuleTest {
     ComponentModule component = new ComponentModule(info);
     ComponentModule parentComponent = new ComponentModule(parentInfo);
 
-    component.resolve(parentComponent, Collections.<String, StackModule>emptyMap(), Collections.<String, ServiceModule>emptyMap());
+    component.resolve(parentComponent, Collections.<String, StackModule>emptyMap(), Collections.<String, ServiceModule>emptyMap(), Collections.<String, ExtensionModule>emptyMap());
 
     return component;
   }

+ 2 - 2
ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java

@@ -118,11 +118,11 @@ public class QuickLinksConfigurationModuleTest {
     QuickLinksConfigurationModule parentModule = new QuickLinksConfigurationModule(parentQuiclinksFile);
     QuickLinksConfigurationModule childModule = new QuickLinksConfigurationModule(childQuickLinksFile);
 
-    childModule.resolve(parentModule, null, null);
+    childModule.resolve(parentModule, null, null, null);
 
     QuickLinks parentQuickLinks = parentModule.getModuleInfo().getQuickLinksConfigurationMap().get(QuickLinksConfigurationModule.QUICKLINKS_CONFIGURATION_KEY);
     QuickLinks childQuickLinks = childModule.getModuleInfo().getQuickLinksConfigurationMap().get(QuickLinksConfigurationModule.QUICKLINKS_CONFIGURATION_KEY);
 
     return new QuickLinks[]{parentQuickLinks, childQuickLinks};
   }
-}
+}

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/stack/ServiceModuleTest.java

@@ -1211,7 +1211,7 @@ public class ServiceModuleTest {
   }
 
   private void resolveService(ServiceModule service, ServiceModule parent) throws AmbariException {
-    service.resolve(parent, Collections.<String, StackModule>emptyMap(), Collections.<String, ServiceModule>emptyMap());
+    service.resolve(parent, Collections.<String, StackModule>emptyMap(), Collections.<String, ServiceModule>emptyMap(), Collections.<String, ExtensionModule>emptyMap());
     // during runtime this would be called by the Stack module when it's resolve completed
     service.finalizeModule();
     parent.finalizeModule();

+ 28 - 4
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java

@@ -29,14 +29,19 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.CommandScriptDefinition;
 import org.apache.ambari.server.state.ComponentInfo;
@@ -58,6 +63,8 @@ public class StackManagerCommonServicesTest {
   private static StackManager stackManager;
   private static MetainfoDAO metaInfoDao;
   private static StackDAO stackDao;
+  private static ExtensionDAO extensionDao;
+  private static ExtensionLinkDAO linkDao;
   private static ActionMetadata actionMetadata;
   private static OsFamily osFamily;
 
@@ -72,17 +79,22 @@ public class StackManagerCommonServicesTest {
 
     String commonServices = ClassLoader.getSystemClassLoader().getResource(
         "common-services").getPath();
-    return createTestStackManager(stack, commonServices);
+    String extensions = ClassLoader.getSystemClassLoader().getResource(
+            "extensions").getPath();
+    return createTestStackManager(stack, commonServices, extensions);
   }
 
   public static StackManager createTestStackManager(String stackRoot,
-      String commonServicesRoot) throws Exception {
+      String commonServicesRoot, String extensionRoot) throws Exception {
     // todo: dao , actionMetaData expectations
     metaInfoDao = createNiceMock(MetainfoDAO.class);
     stackDao = createNiceMock(StackDAO.class);
+    extensionDao = createNiceMock(ExtensionDAO.class);
+    linkDao = createNiceMock(ExtensionLinkDAO.class);
     actionMetadata = createNiceMock(ActionMetadata.class);
     Configuration config = createNiceMock(Configuration.class);
     StackEntity stackEntity = createNiceMock(StackEntity.class);
+    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
 
     expect(config.getSharedResourcesDirPath()).andReturn(
         ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
@@ -91,13 +103,25 @@ public class StackManagerCommonServicesTest {
         stackDao.find(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
 
-    replay(config, stackDao);
+
+    expect(
+        extensionDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
+
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(config, stackDao, extensionDao, linkDao);
+
     osFamily = new OsFamily(config);
 
     replay(metaInfoDao, actionMetadata);
 
     StackManager stackManager = new StackManager(new File(stackRoot), new File(
-        commonServicesRoot), osFamily, false, metaInfoDao, actionMetadata, stackDao);
+        commonServicesRoot), new File(extensionRoot), osFamily, true, metaInfoDao,
+        actionMetadata, stackDao, extensionDao, linkDao);
 
     EasyMock.verify( config, stackDao );
 

+ 131 - 0
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java

@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.stack;
+
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
+import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.ExtensionInfo;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+/**
+ * StackManager extension unit tests.
+ */
+public class StackManagerExtensionTest  {
+
+  @Test
+  public void testExtensions() throws Exception {
+    MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
+    StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
+    ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
+    OsFamily osFamily = createNiceMock(OsFamily.class);
+    StackEntity stackEntity = createNiceMock(StackEntity.class);
+    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
+    ExtensionLinkEntity linkEntity = createNiceMock(ExtensionLinkEntity.class);
+    List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
+    list.add(linkEntity);
+
+    expect(
+        stackDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
+
+    expect(
+        extensionDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
+
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    expect(
+        linkEntity.getExtension()).andReturn(extensionEntity).atLeastOnce();
+
+    expect(
+        extensionEntity.getExtensionName()).andReturn("EXT").atLeastOnce();
+
+    expect(
+        extensionEntity.getExtensionVersion()).andReturn("0.2").atLeastOnce();
+
+    replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao, extensionEntity, linkEntity);
+
+    String stacks = ClassLoader.getSystemClassLoader().getResource("stacks_with_extensions").getPath();
+    String common = ClassLoader.getSystemClassLoader().getResource("common-services").getPath();
+    String extensions = ClassLoader.getSystemClassLoader().getResource("extensions").getPath();
+
+    StackManager stackManager = new StackManager(new File(stacks),
+        new File(common), new File(extensions), osFamily, false,
+        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+
+    ExtensionInfo extension = stackManager.getExtension("EXT", "0.1");
+    assertNull("EXT 0.1's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
+    assertNotNull(extension.getService("OOZIE2"));
+    ServiceInfo oozie = extension.getService("OOZIE2");
+    assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
+    assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
+    assertEquals(oozie.getVersion(), "3.2.0");
+
+    extension = stackManager.getExtension("EXT", "0.2");
+    assertNotNull("EXT 0.2's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
+    assertEquals("EXT 0.2's parent: " + extension.getParentExtensionVersion(), "0.1", extension.getParentExtensionVersion());
+    assertNotNull(extension.getService("OOZIE2"));
+    oozie = extension.getService("OOZIE2");
+    assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
+    assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
+    assertEquals(oozie.getVersion(), "4.0.0");
+
+    StackInfo stack = stackManager.getStack("HDP", "0.2");
+    assertNotNull(stack.getService("OOZIE2"));
+    oozie = stack.getService("OOZIE2");
+    assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
+    assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
+    assertEquals(oozie.getVersion(), "4.0.0");
+
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
+    assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.2");
+  }
+
+}

+ 36 - 11
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java

@@ -28,11 +28,16 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.OsFamily;
@@ -48,6 +53,8 @@ public class StackManagerMiscTest  {
   public void testCycleDetection() throws Exception {
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     OsFamily osFamily = createNiceMock(OsFamily.class);
     StackEntity stackEntity = createNiceMock(StackEntity.class);
@@ -56,13 +63,18 @@ public class StackManagerMiscTest  {
         stackDao.find(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
 
-    replay(actionMetadata, stackDao, metaInfoDao, osFamily);
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(actionMetadata, stackDao, extensionDao, linkDao, metaInfoDao, osFamily);
 
     try {
       String stacksCycle1 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle").getPath();
 
-      StackManager stackManager = new StackManager(new File(stacksCycle1),
-          null, osFamily, false, metaInfoDao, actionMetadata, stackDao);
+      StackManager stackManager = new StackManager(new File(stacksCycle1), null, null, osFamily, false,
+          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
       fail("Expected exception due to cyclic stack");
     } catch (AmbariException e) {
@@ -74,7 +86,7 @@ public class StackManagerMiscTest  {
           "stacks_with_cycle2").getPath();
 
       StackManager stackManager = new StackManager(new File(stacksCycle2),
-          null, osFamily, true, metaInfoDao, actionMetadata, stackDao);
+          null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
       fail("Expected exception due to cyclic stack");
     } catch (AmbariException e) {
@@ -91,6 +103,8 @@ public class StackManagerMiscTest  {
   public void testGetServiceInfoFromSingleStack() throws Exception {
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     OsFamily  osFamily = createNiceMock(OsFamily.class);
     StackEntity stackEntity = createNiceMock(StackEntity.class);
@@ -102,14 +116,18 @@ public class StackManagerMiscTest  {
         stackDao.find(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
 
-    replay(metaInfoDao, stackDao, actionMetadata, osFamily);
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
 
     String singleStack = ClassLoader.getSystemClassLoader().getResource("single_stack").getPath();
 
     StackManager stackManager = new StackManager(new File(singleStack.replace(
-        StackManager.PATH_DELIMITER, File.separator)),
-        null, osFamily, false, metaInfoDao, actionMetadata, stackDao);
-
+        StackManager.PATH_DELIMITER, File.separator)), null, null, osFamily, false, metaInfoDao,
+        actionMetadata, stackDao, extensionDao, linkDao);
 
     Collection<StackInfo> stacks = stackManager.getStacks();
     assertEquals(1, stacks.size());
@@ -126,6 +144,8 @@ public class StackManagerMiscTest  {
   public void testCircularDependencyForServiceUpgrade() throws Exception {
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     OsFamily osFamily = createNiceMock(OsFamily.class);
     StackEntity stackEntity = createNiceMock(StackEntity.class);
@@ -134,13 +154,18 @@ public class StackManagerMiscTest  {
         stackDao.find(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
 
-    replay(actionMetadata, stackDao, metaInfoDao, osFamily);
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
 
     try {
       String upgradeCycle = ClassLoader.getSystemClassLoader().getResource("stacks_with_upgrade_cycle").getPath();
 
-      StackManager stackManager = new StackManager(new File(upgradeCycle),
-          null, osFamily, false, metaInfoDao, actionMetadata, stackDao);
+      StackManager stackManager = new StackManager(new File(upgradeCycle), null, null, osFamily, false,
+          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
       fail("Expected exception due to cyclic service upgrade xml");
     } catch (AmbariException e) {

+ 111 - 12
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java

@@ -34,6 +34,7 @@ import java.lang.reflect.Type;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -47,8 +48,12 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.state.ClientConfigFileDefinition;
 import org.apache.ambari.server.state.CommandScriptDefinition;
 import org.apache.ambari.server.state.ComponentInfo;
@@ -60,6 +65,7 @@ import org.apache.ambari.server.state.stack.MetricDefinition;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMock;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -75,6 +81,8 @@ public class StackManagerTest {
   private static ActionMetadata actionMetadata;
   private static OsFamily osFamily;
   private static StackDAO stackDao;
+  private static ExtensionDAO extensionDao;
+  private static ExtensionLinkDAO linkDao;
 
   @BeforeClass
   public static void initStack() throws Exception{
@@ -90,18 +98,30 @@ public class StackManagerTest {
     // todo: dao , actionMetaData expectations
     metaInfoDao = createNiceMock(MetainfoDAO.class);
     stackDao = createNiceMock(StackDAO.class);
+    extensionDao = createNiceMock(ExtensionDAO.class);
+    linkDao = createNiceMock(ExtensionLinkDAO.class);
     actionMetadata = createNiceMock(ActionMetadata.class);
     Configuration config = createNiceMock(Configuration.class);
+    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
 
     expect(config.getSharedResourcesDirPath()).andReturn(
         ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
 
-    replay(config, metaInfoDao, stackDao, actionMetadata);
+    expect(
+        extensionDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
+
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     osFamily = new OsFamily(config);
 
-    StackManager stackManager = new StackManager(new File(stackRoot), null,
-        osFamily, false, metaInfoDao, actionMetadata, stackDao);
+    StackManager stackManager = new StackManager(new File(stackRoot), null, null, osFamily, false,
+        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
     verify(config, metaInfoDao, stackDao, actionMetadata);
 
@@ -273,7 +293,7 @@ public class StackManagerTest {
     assertEquals("1.0", stack.getVersion());
     Collection<ServiceInfo> services = stack.getServices();
 
-    assertEquals(3, services.size());
+    assertEquals(4, services.size());
 
     // hdfs service
     assertEquals(6, stack.getService("HDFS").getComponents().size());
@@ -361,7 +381,7 @@ public class StackManagerTest {
     StackInfo baseStack = stackManager.getStack("OTHER", "1.0");
     StackInfo stack = stackManager.getStack("OTHER", "2.0");
 
-    assertEquals(4, stack.getServices().size());
+    assertEquals(5, stack.getServices().size());
 
     ServiceInfo service = stack.getService("SQOOP2");
     ServiceInfo baseSqoopService = baseStack.getService("SQOOP2");
@@ -426,6 +446,19 @@ public class StackManagerTest {
     assertEquals("env", clientConfigs.get(1).getType());
   }
 
+  @Test
+  public void testPackageInheritance() throws Exception{
+    StackInfo stack = stackManager.getStack("HDP", "2.0.7");
+    assertNotNull(stack.getService("HBASE"));
+    ServiceInfo hbase = stack.getService("HBASE");
+    assertNotNull("Package dir is " + hbase.getServicePackageFolder(), hbase.getServicePackageFolder());
+
+    stack = stackManager.getStack("HDP", "2.0.8");
+    assertNotNull(stack.getService("HBASE"));
+    hbase = stack.getService("HBASE");
+    assertNotNull("Package dir is " + hbase.getServicePackageFolder(), hbase.getServicePackageFolder());
+  }
+
   @Test
   public void testMonitoringServicePropertyInheritance() throws Exception{
     StackInfo stack = stackManager.getStack("HDP", "2.0.8");
@@ -624,6 +657,9 @@ public class StackManagerTest {
     ArrayList<String> hbaseMasterStartValues = (ArrayList<String>) generalDeps.get("HBASE_MASTER-START");
     assertTrue(hbaseMasterStartValues.get(0).equals("ZOOKEEPER_SERVER-START"));
 
+    ServiceInfo service = stack.getService("PIG");
+    assertNotNull("PIG's roll command order is null", service.getRoleCommandOrder());
+
     assertTrue(optionalNoGlusterfs.containsKey("NAMENODE-STOP"));
     ArrayList<String> nameNodeStopValues = (ArrayList<String>) optionalNoGlusterfs.get("NAMENODE-STOP");
     assertTrue(nameNodeStopValues.contains("JOBTRACKER-STOP"));
@@ -633,6 +669,7 @@ public class StackManagerTest {
     ArrayList<String> customMasterStartValues = (ArrayList<String>) generalDeps.get("CUSTOM_MASTER-START");
     assertTrue(customMasterStartValues.contains("ZOOKEEPER_SERVER-START"));
     assertTrue(customMasterStartValues.contains("NAMENODE-START"));
+
   }
 
   @Test
@@ -653,21 +690,41 @@ public class StackManagerTest {
 
     File stackRoot = new File(resourcesDirectory, "stacks");
     File commonServices = new File(resourcesDirectory, "common-services");
+    File extensions = null;
+
+    try {
+         URL extensionsURL = ClassLoader.getSystemClassLoader().getResource("extensions");
+      if (extensionsURL != null)
+        extensions = new File(extensionsURL.getPath().replace("test-classes","classes"));
+    }
+    catch (Exception e) {}
 
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     Configuration config = createNiceMock(Configuration.class);
+    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
 
     expect(config.getSharedResourcesDirPath()).andReturn(
             ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
 
-    replay(config, metaInfoDao, stackDao, actionMetadata);
+    expect(
+        extensionDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
+
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
 
-    StackManager stackManager = new StackManager(stackRoot, commonServices,
-            osFamily, false, metaInfoDao, actionMetadata, stackDao);
+    StackManager stackManager = new StackManager(stackRoot, commonServices, extensions,
+            osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
     for (StackInfo stackInfo : stackManager.getStacks()) {
       for (ServiceInfo serviceInfo : stackInfo.getServices()) {
@@ -697,20 +754,41 @@ public class StackManagerTest {
 
     File stackRoot = new File(resourcesDirectory, "stacks");
     File commonServices = new File(resourcesDirectory, "common-services");
+    File extensions = null;
+
+    try {
+      URL extensionsURL = ClassLoader.getSystemClassLoader().getResource("extensions");
+      if (extensionsURL != null)
+        extensions = new File(extensionsURL.getPath().replace("test-classes","classes"));
+    }
+    catch (Exception e) {}
 
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     Configuration config = createNiceMock(Configuration.class);
+    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
 
     expect(config.getSharedResourcesDirPath()).andReturn(
       ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
 
-    replay(config, metaInfoDao, stackDao, actionMetadata);
+    expect(
+        extensionDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
+
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
 
-    StackManager stackManager = new StackManager(stackRoot, commonServices, osFamily, false, metaInfoDao, actionMetadata, stackDao);
+    StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
+        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
     String rangerUserSyncRoleCommand = Role.RANGER_USERSYNC + "-" + RoleCommand.START;
     String rangerAdminRoleCommand = Role.RANGER_ADMIN + "-" + RoleCommand.START;
@@ -804,20 +882,41 @@ public class StackManagerTest {
 
     File stackRoot = new File(resourcesDirectory, "stacks");
     File commonServices = new File(resourcesDirectory, "common-services");
+    File extensions = null;
+
+    try {
+         URL extensionsURL = ClassLoader.getSystemClassLoader().getResource("extensions");
+      if (extensionsURL != null)
+        extensions = new File(extensionsURL.getPath().replace("test-classes","classes"));
+    }
+    catch (Exception e) {}
 
     MetainfoDAO metaInfoDao = createNiceMock(MetainfoDAO.class);
     StackDAO stackDao = createNiceMock(StackDAO.class);
+    ExtensionDAO extensionDao = createNiceMock(ExtensionDAO.class);
+    ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     Configuration config = createNiceMock(Configuration.class);
+    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
 
     expect(config.getSharedResourcesDirPath()).andReturn(
       ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
 
-    replay(config, metaInfoDao, stackDao, actionMetadata);
+    expect(
+        extensionDao.find(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
+
+    List<ExtensionLinkEntity> list = Collections.emptyList();
+    expect(
+        linkDao.findByStack(EasyMock.anyObject(String.class),
+            EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
+
+    replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
 
-    StackManager stackManager = new StackManager(stackRoot, commonServices, osFamily, false, metaInfoDao, actionMetadata, stackDao);
+    StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
+        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
 
     String zookeeperServerRoleCommand = Role.ZOOKEEPER_SERVER + "-" + RoleCommand.START;
     String logsearchServerRoleCommand = Role.LOGSEARCH_SERVER + "-" + RoleCommand.START;

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/stack/ThemeModuleTest.java

@@ -36,7 +36,7 @@ public class ThemeModuleTest {
     ThemeModule parentModule = new ThemeModule(parentThemeFile);
     ThemeModule childModule = new ThemeModule(childThemeFile);
 
-    childModule.resolve(parentModule, null, null);
+    childModule.resolve(parentModule, null, null, null);
 
     Theme childTheme = childModule.getModuleInfo().getThemeMap().get(ThemeModule.THEME_KEY);
     Theme parentTheme = parentModule.getModuleInfo().getThemeMap().get(ThemeModule.THEME_KEY);

+ 42 - 1
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java

@@ -168,6 +168,9 @@ public class UpgradeCatalog240Test {
     Connection connection = createNiceMock(Connection.class);
     Statement statement = createNiceMock(Statement.class);
     ResultSet resultSet = createNiceMock(ResultSet.class);
+
+    Capture<List<DBAccessor.DBColumnInfo>> capturedExtensionColumns = EasyMock.newCapture();
+    Capture<List<DBAccessor.DBColumnInfo>> capturedExtensionLinkColumns = EasyMock.newCapture();
     Capture<List<DBAccessor.DBColumnInfo>> capturedSettingColumns = EasyMock.newCapture();
 
     dbAccessor.addColumn(eq("adminpermission"), capture(capturedSortOrderColumnInfo));
@@ -176,7 +179,20 @@ public class UpgradeCatalog240Test {
     dbAccessor.addColumn(eq(UpgradeCatalog240.SERVICE_COMPONENT_DESIRED_STATE_TABLE),
         capture(capturedScDesiredVersionColumnInfo));
 
+    dbAccessor.createTable(eq("extension"), capture(capturedExtensionColumns), eq("extension_id"));
+    dbAccessor.addUniqueConstraint("extension", "UQ_extension", "extension_name", "extension_version");
+
+    expect(dbAccessor.getConnection()).andReturn(connection);
+    dbAccessor.createTable(eq("extensionlink"), capture(capturedExtensionLinkColumns), eq("link_id"));
+    dbAccessor.addUniqueConstraint("extensionlink", "UQ_extension_link", "stack_id", "extension_id");
+    dbAccessor.addFKConstraint("extensionlink", "FK_extensionlink_extension_id", "extension_id", "extension", 
+                               "extension_id", false);
+    dbAccessor.addFKConstraint("extensionlink", "FK_extensionlink_stack_id", "stack_id", "stack",
+                               "stack_id", false);
+
+    expect(dbAccessor.getConnection()).andReturn(connection);
     dbAccessor.createTable(eq("setting"), capture(capturedSettingColumns), eq("id"));
+
     expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
     expect(dbAccessor.getConnection()).andReturn(connection);
     expect(connection.createStatement()).andReturn(statement);
@@ -272,7 +288,7 @@ public class UpgradeCatalog240Test {
     // Test remote Cluster Tables
     Capture<List<DBAccessor.DBColumnInfo>> capturedRemoteAmbariClusterColumns = EasyMock.newCapture();
     dbAccessor.createTable(eq(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_TABLE), capture(capturedRemoteAmbariClusterColumns),anyString());
-    dbAccessor.addUniqueConstraint(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_TABLE , "unq_remote_ambari_cluster" , UpgradeCatalog240.CLUSTER_NAME);
+    dbAccessor.addUniqueConstraint(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_TABLE , "UQ_remote_ambari_cluster" , UpgradeCatalog240.CLUSTER_NAME);
     expect(dbAccessor.getConnection()).andReturn(connection);
     expect(connection.createStatement()).andReturn(statement);
 
@@ -369,6 +385,31 @@ public class UpgradeCatalog240Test {
 
     assertEquals(expectedCaptures, actualCaptures);
 
+    expectedCaptures = new HashMap<>();
+    expectedCaptures.put("extension_id", Long.class);
+    expectedCaptures.put("extension_name", String.class);
+    expectedCaptures.put("extension_version", String.class);
+
+    actualCaptures = new HashMap<>();
+    for(DBAccessor.DBColumnInfo settingColumnInfo : capturedExtensionColumns.getValue()) {
+      actualCaptures.put(settingColumnInfo.getName(), settingColumnInfo.getType());
+    }
+
+    assertEquals(expectedCaptures, actualCaptures);
+
+
+    expectedCaptures = new HashMap<>();
+    expectedCaptures.put("link_id", Long.class);
+    expectedCaptures.put("stack_id", Long.class);
+    expectedCaptures.put("extension_id", Long.class);
+
+    actualCaptures = new HashMap<>();
+    for(DBAccessor.DBColumnInfo settingColumnInfo : capturedExtensionLinkColumns.getValue()) {
+      actualCaptures.put(settingColumnInfo.getName(), settingColumnInfo.getType());
+    }
+
+    assertEquals(expectedCaptures, actualCaptures);
+
     expectedCaptures = new HashMap<>();
     expectedCaptures.put("id", Long.class);
     expectedCaptures.put("component_id", Long.class);

+ 30 - 0
ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>false</active>
+  </versions>
+  <prerequisites>
+    <min-stack-versions>
+      <stack>
+        <name>HDP</name>
+        <version>0.1.*</version>
+      </stack>
+    </min-stack-versions>
+  </prerequisites>
+</metainfo>

+ 245 - 0
ambari-server/src/test/resources/extensions/EXT/0.1/services/OOZIE2/configuration/oozie2-site.xml

@@ -0,0 +1,245 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration>
+
+<!--
+    Refer to the oozie-default.xml file for the complete list of
+    Oozie configuration properties and their default values.
+-->
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+   </property>
+
+  <property>
+    <name>oozie.system.id</name>
+    <value>oozie-${user.name}</value>
+    <description>
+    The Oozie system ID.
+    </description>
+   </property>
+
+   <property>
+     <name>oozie.systemmode</name>
+     <value>NORMAL</value>
+     <description>
+     System mode for  Oozie at startup.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.AuthorizationService.security.enabled</name>
+     <value>true</value>
+     <description>
+     Specifies whether security (user name/admin role) is enabled or not.
+     If disabled any user can manage Oozie system and manage any job.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.PurgeService.older.than</name>
+     <value>30</value>
+     <description>
+     Jobs older than this value, in days, will be purged by the PurgeService.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.PurgeService.purge.interval</name>
+     <value>3600</value>
+     <description>
+     Interval at which the purge service will run, in seconds.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.queue.size</name>
+     <value>1000</value>
+     <description>Max callable queue size</description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.threads</name>
+     <value>10</value>
+     <description>Number of threads used for executing callables</description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.callable.concurrency</name>
+     <value>3</value>
+     <description>
+     Maximum concurrency for a given callable type.
+     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+     All commands that use action executors (action-start, action-end, action-kill and action-check) use
+     the action type as the callable type.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.coord.normal.default.timeout</name>
+     <value>120</value>
+     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout</description>
+   </property>
+
+   <property>
+     <name>oozie.db.schema.name</name>
+     <value>oozie</value>
+     <description>
+      Oozie DataBase Name
+     </description>
+   </property>
+
+    <property>
+      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+      <value> </value>
+      <description>
+      Whitelisted job tracker for Oozie service.
+      </description>
+    </property>
+
+    <property>
+      <name>oozie.authentication.type</name>
+      <value>simple</value>
+      <description>
+      </description>
+    </property>
+
+    <property>
+      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+      <value> </value>
+      <description>
+      </description>
+    </property>
+
+    <property>
+      <name>oozie.service.WorkflowAppService.system.libpath</name>
+      <value>/user/${user.name}/share/lib</value>
+      <description>
+      System library path to use for workflow applications.
+      This path is added to workflow application if their job properties sets
+      the property 'oozie.use.system.libpath' to true.
+      </description>
+    </property>
+
+    <property>
+      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+      <value>false</value>
+      <description>
+      If set to true, submissions of MapReduce and Pig jobs will include
+      automatically the system library path, thus not requiring users to
+      specify where the Pig JAR files are. Instead, the ones from the system
+      library path are used.
+      </description>
+    </property>
+    <property>
+      <name>oozie.authentication.kerberos.name.rules</name>
+      <value>
+
+
+
+
+
+        </value>
+      <description>The mapping from kerberos principal names to local OS user names.</description>
+    </property>
+    <property>
+      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+      <value>*=/etc/hadoop/conf</value>
+      <description>
+          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+          the relevant Hadoop *-site.xml files. If the path is relative is looked within
+          the Oozie configuration directory; though the path can be absolute (i.e. to point
+          to Hadoop client conf/ directories in the local filesystem.
+      </description>
+    </property>
+    <property>
+        <name>oozie.service.ActionService.executor.ext.classes</name>
+        <value>
+            org.apache.oozie.action.email.EmailActionExecutor,
+            org.apache.oozie.action.hadoop.HiveActionExecutor,
+            org.apache.oozie.action.hadoop.ShellActionExecutor,
+            org.apache.oozie.action.hadoop.SqoopActionExecutor,
+            org.apache.oozie.action.hadoop.DistcpActionExecutor
+        </value>
+    </property>
+
+    <property>
+        <name>oozie.service.SchemaService.wf.ext.schemas</name>
+        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,hive-action-0.3.xsd</value>
+    </property>
+    <property>
+        <name>oozie.service.JPAService.create.db.schema</name>
+        <value>false</value>
+        <description>
+            Creates Oozie DB.
+
+            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.driver</name>
+        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+        <description>
+            JDBC driver class.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.url</name>
+        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+        <description>
+            JDBC URL.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.username</name>
+        <value>sa</value>
+        <description>
+            DB user name.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.password</name>
+        <value> </value>
+        <description>
+            DB user password.
+
+            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+                       if empty Configuration assumes it is NULL.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.pool.max.active.conn</name>
+        <value>10</value>
+        <description>
+             Max number of connections.
+        </description>
+    </property>
+</configuration>

+ 110 - 0
ambari-server/src/test/resources/extensions/EXT/0.1/services/OOZIE2/metainfo.xml

@@ -0,0 +1,110 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE2</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
+      <version>3.2.0</version>
+
+      <components>
+        <component>
+          <name>OOZIE2_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE2_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie2.noarch</name>
+            </package>
+            <package>
+              <name>oozie2-client.noarch</name>
+            </package>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie2-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 20 - 0
ambari-server/src/test/resources/extensions/EXT/0.1/services/OOZIE2/package/dummy-script.py

@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

+ 26 - 0
ambari-server/src/test/resources/extensions/EXT/0.1/services/PIG2/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG2</name>
+      <extends>common-services/PIG/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 31 - 0
ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml

@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <extends>0.1</extends>
+  <prerequisites>
+    <min-stack-versions>
+      <stack>
+        <name>HDP</name>
+        <version>0.2.*</version>
+      </stack>
+    </min-stack-versions>
+  </prerequisites>
+</metainfo>

+ 110 - 0
ambari-server/src/test/resources/extensions/EXT/0.2/services/OOZIE2/metainfo.xml

@@ -0,0 +1,110 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE2</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
+      <version>4.0.0</version>
+
+      <components>
+        <component>
+          <name>OOZIE2_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE2_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie2.noarch</name>
+            </package>
+            <package>
+              <name>oozie2-client.noarch</name>
+            </package>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie2-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 30 - 0
ambari-server/src/test/resources/stacks/OTHER/1.0/services/PIG2/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG2</name>
+      <displayName>Pig</displayName>
+      <comment>Extended PIG</comment>
+      <version>Extended Version</version>
+      <extends>HDP/2.1.1/PIG</extends>
+    </service>
+  </services>
+
+</metainfo>

+ 22 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/metainfo.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.0</upgrade>
+    </versions>
+</metainfo>

+ 57 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/repos/repoinfo.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="centos6, redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5, redhat5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

+ 46 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/services/HDFS/metainfo.xml

@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <components>
+        <component>
+          <name>DATANODE1</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DATANODE2</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+    </service>
+  </services>
+</metainfo>

+ 23 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/services/MAPREDUCE/metainfo.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.1/services/PIG/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <extends>common-services/PIG/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 22 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/metainfo.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.1</upgrade>
+    </versions>
+</metainfo>

+ 57 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/repos/repoinfo.xml

@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

+ 26 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HBASE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

+ 145 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/global.xml

@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>

+ 223 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hadoop-env.xml

@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

+ 137 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hbase-site.xml

@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

+ 199 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hdfs-log4j.xml

@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

+ 396 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/configuration/hdfs-site.xml

@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

+ 30 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/metainfo.xml

@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

+ 20 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HDFS/package/dummy-script.py

@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

+ 26 - 0
ambari-server/src/test/resources/stacks_with_extensions/HDP/0.2/services/HIVE/metainfo.xml

@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini