瀏覽代碼

AMBARI-8878: Common Services: Refactor HDP-2.0.6 YARN service (Jayush Luniya)

Jayush Luniya 10 年之前
父節點
當前提交
53c391541f
共有 51 個文件被更改,包括 623 次插入1575 次删除
  1. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
  2. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/alerts.json
  3. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
  4. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
  5. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/capacity-scheduler.xml
  6. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml
  7. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-log4j.xml
  8. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
  9. 242 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
  10. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json
  11. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py
  12. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/files/validateYarnComponentStatus.py
  13. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/__init__.py
  14. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
  15. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
  16. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py
  17. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
  18. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
  19. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py
  20. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py
  21. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
  22. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
  23. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
  24. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/status_params.py
  25. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
  26. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
  27. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/container-executor.cfg.j2
  28. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/exclude_hosts_list.j2
  29. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/mapreduce.conf.j2
  30. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/taskcontroller.cfg.j2
  31. 0 0
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/yarn.conf.j2
  32. 2 213
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml
  33. 0 131
      ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/capacity-scheduler.xml
  34. 0 150
      ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/yarn-env.xml
  35. 0 319
      ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/yarn-site.xml
  36. 0 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/metainfo.xml
  37. 46 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-env.xml
  38. 3 223
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml
  39. 0 114
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml
  40. 45 0
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-env.xml
  41. 12 149
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml
  42. 2 166
      ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
  43. 58 23
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
  44. 15 6
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
  45. 10 4
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
  46. 73 28
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
  47. 60 26
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
  48. 19 8
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
  49. 10 4
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
  50. 22 9
      ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
  51. 2 0
      ambari-web/app/data/HDP2/site_properties.js

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java

@@ -249,7 +249,8 @@ public class StackManager {
           ServiceMetainfoXml metaInfoXml = serviceDirectory.getMetaInfoFile();
           ServiceMetainfoXml metaInfoXml = serviceDirectory.getMetaInfoFile();
           for (ServiceInfo serviceInfo : metaInfoXml.getServices()) {
           for (ServiceInfo serviceInfo : metaInfoXml.getServices()) {
             ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory, true);
             ServiceModule serviceModule = new ServiceModule(stackContext, serviceInfo, serviceDirectory, true);
-            String commonServiceKey = serviceName + StackManager.PATH_DELIMITER + serviceVersion;
+
+            String commonServiceKey = serviceInfo.getName() + StackManager.PATH_DELIMITER + serviceInfo.getVersion();
             commonServiceModules.put(commonServiceKey, serviceModule);
             commonServiceModules.put(commonServiceKey, serviceModule);
           }
           }
         }
         }

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/alerts.json → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/alerts.json


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/mapred-env.xml → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/mapred-site.xml → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/capacity-scheduler.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-env.xml → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-env.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-log4j.xml → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-log4j.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml


+ 242 - 0
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml

@@ -0,0 +1,242 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0</version>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <displayName>ResourceManager</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REFRESHQUEUES</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <displayName>NodeManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <displayName>YARN Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>yarn-site.xml</fileName>
+              <dictionaryName>yarn-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>yarn-env.sh</fileName>
+              <dictionaryName>yarn-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>capacity-scheduler.xml</fileName>
+              <dictionaryName>capacity-scheduler</dictionaryName>
+            </configFile>                        
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <displayName>History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <displayName>MapReduce2 Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>mapred-site.xml</fileName>
+              <dictionaryName>mapred-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>mapred-env.sh</fileName>
+              <dictionaryName>mapred-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
+</metainfo>

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metrics.json → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metrics.json


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/alerts/alert_nodemanager_health.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/files/validateYarnComponentStatus.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/files/validateYarnComponentStatus.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/__init__.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/__init__.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/application_timeline_server.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapred_service_check.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/nodemanager.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/nodemanager_upgrade.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager_upgrade.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/status_params.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/container-executor.cfg.j2 → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/container-executor.cfg.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/exclude_hosts_list.j2 → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/exclude_hosts_list.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/mapreduce.conf.j2 → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/mapreduce.conf.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/taskcontroller.cfg.j2 → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/taskcontroller.cfg.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn.conf.j2 → ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/yarn.conf.j2


+ 2 - 213
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml

@@ -21,222 +21,11 @@
   <services>
   <services>
     <service>
     <service>
       <name>YARN</name>
       <name>YARN</name>
-      <displayName>YARN</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0</version>
-      <components>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <displayName>ResourceManager</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REFRESHQUEUES</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <displayName>NodeManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>YARN_CLIENT</name>
-          <displayName>YARN Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>yarn-site.xml</fileName>
-              <dictionaryName>yarn-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>yarn-env.sh</fileName>
-              <dictionaryName>yarn-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>capacity-scheduler.xml</fileName>
-              <dictionaryName>capacity-scheduler</dictionaryName>
-            </configFile>                        
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-yarn</name>
-            </package>
-            <package>
-              <name>hadoop-hdfs</name>
-            </package>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
+      <extends>common-services/YARN/2.1.0.2.0</extends>
     </service>
     </service>
-
     <service>
     <service>
       <name>MAPREDUCE2</name>
       <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <displayName>History Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>YARN/RESOURCEMANAGER</co-locate>
-          </auto-deploy>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <displayName>MapReduce2 Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>mapred-site.xml</fileName>
-              <dictionaryName>mapred-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>mapred-env.sh</fileName>
-              <dictionaryName>mapred-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dir>configuration-mapred</configuration-dir>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-env</config-type>
-      </configuration-dependencies>
+      <extends>common-services/MAPREDUCE2/2.1.0.2.0.6.0</extends>
     </service>
     </service>
-
   </services>
   </services>
 </metainfo>
 </metainfo>

+ 0 - 131
ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/capacity-scheduler.xml

@@ -1,131 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>
-      Default minimum queue resource limit depends on the number of users who have submitted applications.
-    </description>
-  </property>
-
-</configuration>

+ 0 - 150
ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/yarn-env.xml

@@ -21,159 +21,9 @@
 -->
 -->
 
 
 <configuration>
 <configuration>
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <property-type>USER</property-type>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
   <property>
   <property>
     <name>apptimelineserver_heapsize</name>
     <name>apptimelineserver_heapsize</name>
     <value>1024</value>
     <value>1024</value>
     <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
     <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
   </property>
   </property>
-
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-export JAVA_HOME={{java64_home}}
-
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-# resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-# some Java parameters
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
-
-# For setting YARN specific HEAP sizes please use this
-# Parameter and set appropriately
-YARN_HEAPSIZE={{yarn_heapsize}}
-
-# check envvars which might override default args
-if [ "$YARN_HEAPSIZE" != "" ]; then
-  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-fi
-
-# Resource Manager specific parameters
-
-# Specify the max Heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the ResourceManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_RESOURCEMANAGER_OPTS=
-
-# Node Manager specific parameters
-
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-# Specify the max Heapsize for the HistoryManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1024.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_HISTORYSERVER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-# Specify the JVM options to be used when starting the NodeManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_NODEMANAGER_OPTS=
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-
-# default log directory and file
-if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-fi
-if [ "$YARN_LOGFILE" = "" ]; then
-  YARN_LOGFILE='yarn.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$YARN_POLICYFILE" = "" ]; then
-  YARN_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
-YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-    </value>
-  </property>
-  
 </configuration>
 </configuration>

+ 0 - 319
ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/yarn-site.xml

@@ -20,325 +20,6 @@
 <!-- Put site-specific property overrides in this file. -->
 <!-- Put site-specific property overrides in this file. -->
 
 
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-    <description> The address of ResourceManager. </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-  </property>
-
-  <property>
-    <name>yarn.admin.acl</name>
-    <value></value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-    <description>The auxiliary service class to use </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>Location to aggregate logs to. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-  </property>
-
   <property>
   <property>
     <name>yarn.timeline-service.enabled</name>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <value>true</value>

+ 0 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/metainfo.xml

@@ -21,7 +21,6 @@
   <services>
   <services>
     <service>
     <service>
       <name>YARN</name>
       <name>YARN</name>
-      <displayName>YARN</displayName>
       <version>2.4.0.2.1</version>
       <version>2.4.0.2.1</version>
       <components>
       <components>
 
 

+ 46 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-env.xml

@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>c:\hadoop\logs\hadoop-mapreduce</value>
+    <description>Mapreduce Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>c:\hadoop\run\hadoop-mapreduce</value>
+    <description>Mapreduce PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <deleted>true</deleted>
+  </property>
+
+  <!-- mapred-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for mapred-env.cmd file</description>
+    <value>
+    </value>
+  </property>
+</configuration>

+ 3 - 223
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml

@@ -2,238 +2,18 @@
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!-- Put site-specific property overrides in this file. -->
 <!-- Put site-specific property overrides in this file. -->
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-  <!-- MR AM properties -->
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-  </property>
-  <property>
-    <name>mapreduce.job.hdfs-servers</name>
-    <value>${fs.defaultFS}</value>
-  </property>
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>Fraction of the number of maps in the job which should be
-      complete before reduces are scheduled for the job.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string. A value of 0 disables the timeout.
-    </description>
-  </property>
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>mapreduce.job.acl-view-job</name>
-    <value>*</value>
-  </property>
-  <!-- i/o properties -->
-  <property>
-    <name>io.sort.mb</name>
-    <value>200</value>
-    <description>No description</description>
-  </property>
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value>0.9</value>
-    <description>No description</description>
-  </property>
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-  <!-- map tasks' properties -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>true</value>
-    <description>Should the outputs of the maps be compressed before being
-               sent across the network. Uses SequenceFile compression.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-               compressed?
-    </description>
-  </property>
-  <!-- reduce tasks' properties -->
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>The default number of parallel transfers run by reduce
-      during the copy(shuffle) phase.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.reduce.merge.inmem.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-      for the in-memory merge process. When we accumulate threshold number of files
-      we initiate the in-memory merge and spill to disk. A value of 0 or less than
-      0 indicates we want to DON'T have any threshold and instead depend only on
-      the ramfs's memory consumption to trigger the merge.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.70</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-  <!-- JobHistory Server -->
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mapred/history/done_intermediate</value>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mapred/history/done</value>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.webapp.https.address</name>
-    <value>localhost:19888</value>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.create-intermediate-jh-base-dir</name>
-    <value>false</value>
-  </property>
-  <!-- JobHistory Security Settings -->
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>%HADOOP_CONF_DIR%,%HADOOP_COMMON_HOME%/share/hadoop/common/*,%HADOOP_COMMON_HOME%/share/hadoop/common/lib/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/lib/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/lib/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/lib/*</value>
-    <description>CLASSPATH for MR applications. A comma-separated list
-      of CLASSPATH entries</description>
-  </property>
-  <property>
-    <name>mapreduce.shuffle.ssl.enabled</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>mapreduce.ssl.enabled</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>mapreduce.job.counters.max</name>
-    <value>20000</value>
-  </property>
-  <property>
-    <name>mapreduce.job.counters.groups.max</name>
-    <value>10000</value>
-  </property>
-  <property>
-    <name>mapreduce.job.counters.group.name.max</name>
-    <value>1000</value>
-  </property>
-  <property>
-    <name>mapreduce.job.counters.counter.name.max</name>
-    <value>1000</value>
-  </property>
   <property>
   <property>
     <name>mapreduce.cluster.local.dir</name>
     <name>mapreduce.cluster.local.dir</name>
-    <value>c:\hdpdata\hadoop\local</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>/mapred/history/done</value>
+    <value>c:\hadoop\temp\local\hadoop-mapreduce</value>
   </property>
   </property>
 
 
   <property>
   <property>
     <name>mapred.local.dir</name>
     <name>mapred.local.dir</name>
-    <value>c:\hdpdata\hdfs\mapred\local</value>
-  </property>
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
+    <value>c:\hadoop\temp\local\hadoop-mapreduce</value>
   </property>
   </property>
 
 
   <property>
   <property>
     <name>mapred.child.tmp</name>
     <name>mapred.child.tmp</name>
-    <value>c:\hdp\temp\hadoop</value>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
+    <value>c:\hadoop\temp\hadoop-mapred</value>
   </property>
   </property>
 </configuration>
 </configuration>

+ 0 - 114
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml

@@ -1,114 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.1</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description>
-      The ResourceCalculator implementation to be used to compare
-      Resources in the scheduler.
-      The default i.e. DefaultResourceCalculator only uses Memory while
-      DominantResourceCalculator uses dominant-resource to compare
-      multi-dimensional resources such as Memory, CPU etc.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default,joblauncher</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>95</value>
-    <description>Default queue target capacity.</description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>10</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.joblauncher.capacity</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.joblauncher.user-limit-factor</name>
-    <value>10</value>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.joblauncher.maximum-capacity</name>
-    <value>50</value>
-  </property>
-</configuration>

+ 45 - 0
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-env.xml

@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>c:\hadoop\logs\hadoop-yarn</value>
+    <description>YARN Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>c:\hadoop\run\hadoop-yarn</value>
+    <description>YARN PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <deleted>true</deleted>
+  </property>
+  <!-- yarn-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for yarn-env.cmd file</description>
+    <value>
+    </value>
+  </property>
+</configuration>

+ 12 - 149
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml

@@ -2,32 +2,7 @@
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!-- Put site-specific property overrides in this file. -->
 <!-- Put site-specific property overrides in this file. -->
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
 
 
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
   <property>
   <property>
     <name>yarn.nodemanager.pmem-check-enabled</name>
     <name>yarn.nodemanager.pmem-check-enabled</name>
     <value>true</value>
     <value>true</value>
@@ -37,151 +12,52 @@
     <value>true</value>
     <value>true</value>
   </property>
   </property>
   <!-- NodeManager -->
   <!-- NodeManager -->
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
   <property>
   <property>
     <name>yarn.nodemanager.webapp.address</name>
     <name>yarn.nodemanager.webapp.address</name>
     <value>0.0.0.0:50060</value>
     <value>0.0.0.0:50060</value>
   </property>
   </property>
+
   <property>
   <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>The interval, in milliseconds, for which the node manager
-      waits between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-  </property>
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-  </property>
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-  </property>
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>36000</value>
-  </property>
-  <property>
-    <description>Store class name for history store, defaulting to file system store</description>
     <name>yarn.timeline-service.generic-application-history.store-class</name>
     <name>yarn.timeline-service.generic-application-history.store-class</name>
     <value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value>
     <value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value>
+    <description>Store class name for history store, defaulting to file system store</description>
   </property>
   </property>
   <!-- Use a directory that is set up on HDFS to store generic history -->
   <!-- Use a directory that is set up on HDFS to store generic history -->
   <property>
   <property>
+    <name>yarn.timeline-service.generic-application-history.fs-history-store.uri</name>
+    <value>/yarn/generic-history/</value>
     <description>URI pointing to the location of the FileSystem path where the history will be persisted. This must be
     <description>URI pointing to the location of the FileSystem path where the history will be persisted. This must be
       supplied when using org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore as
       supplied when using org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore as
       the value for yarn.timeline-service.generic-application-history.store-class
       the value for yarn.timeline-service.generic-application-history.store-class
     </description>
     </description>
-    <name>yarn.timeline-service.generic-application-history.fs-history-store.uri</name>
-    <value>/yarn/generic-history/</value>
   </property>
   </property>
   <property>
   <property>
-    <description>T-file compression types used to compress history data.</description>
     <name>yarn.timeline-service.generic-application-history.fs-history-store.compression-type</name>
     <name>yarn.timeline-service.generic-application-history.fs-history-store.compression-type</name>
     <value>none</value>
     <value>none</value>
+    <description>T-file compression types used to compress history data.</description>
   </property>
   </property>
   <property>
   <property>
+    <name>yarn.timeline-service.generic-application-history.enabled</name>
+    <value>false</value>
     <description>Indicate to ResourceManager as well as clients whether
     <description>Indicate to ResourceManager as well as clients whether
       history-service is enabled or not. If enabled, ResourceManager starts
       history-service is enabled or not. If enabled, ResourceManager starts
       recording historical data that ApplicationHistory service can consume.
       recording historical data that ApplicationHistory service can consume.
       Similarly, clients can redirect to the history service when applications
       Similarly, clients can redirect to the history service when applications
       finish if this is enabled.
       finish if this is enabled.
     </description>
     </description>
-    <name>yarn.timeline-service.generic-application-history.enabled</name>
-    <value>false</value>
   </property>
   </property>
   <property>
   <property>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
     <name>yarn.timeline-service.enabled</name>
     <name>yarn.timeline-service.enabled</name>
     <value>false</value>
     <value>false</value>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
     </description>
     </description>
   </property>
   </property>
 
 
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-  </property>
 
 
   <property>
   <property>
     <name>yarn.nodemanager.local-dirs</name>
     <name>yarn.nodemanager.local-dirs</name>
-    <value>c:\hdpdata\hadoop\local</value>
+    <value>c:\hadoop\temp\local\hadoop-yarn</value>
   </property>
   </property>
 
 
   <property>
   <property>
@@ -191,24 +67,11 @@
 
 
   <property>
   <property>
     <name>yarn.nodemanager.log-dirs</name>
     <name>yarn.nodemanager.log-dirs</name>
-    <value>c:\hdpdata\hadoop\logs</value>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
+    <value>c:\hadoop\logs\hadoop-yarn</value>
   </property>
   </property>
 
 
-  <property>
+   <property>
     <name>yarn.timeline-service.hostname</name>
     <name>yarn.timeline-service.hostname</name>
     <value>localhost</value>
     <value>localhost</value>
   </property>
   </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-  </property>
 </configuration>
 </configuration>

+ 2 - 166
ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml

@@ -21,166 +21,30 @@
   <services>
   <services>
     <service>
     <service>
       <name>YARN</name>
       <name>YARN</name>
-      <displayName>YARN</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <extends>common-services/YARN/2.1.0.2.0</extends>
       <version>2.4.0.2.1.1.0</version>
       <version>2.4.0.2.1.1.0</version>
       <components>
       <components>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <displayName>ResourceManager</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REFRESHQUEUES</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <displayName>NodeManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
         <component>
         <component>
           <name>YARN_CLIENT</name>
           <name>YARN_CLIENT</name>
-          <displayName>Yarn Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
           <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>yarn-site.xml</fileName>
-              <dictionaryName>yarn-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
             <configFile>
             <configFile>
               <type>env</type>
               <type>env</type>
               <fileName>yarn-env.cmd</fileName>
               <fileName>yarn-env.cmd</fileName>
               <dictionaryName>yarn-env</dictionaryName>
               <dictionaryName>yarn-env</dictionaryName>
             </configFile>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>capacity-scheduler.xml</fileName>
-              <dictionaryName>capacity-scheduler</dictionaryName>
-            </configFile>
           </configFiles>
           </configFiles>
         </component>
         </component>
       </components>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
     </service>
     </service>
 
 
     <service>
     <service>
       <name>MAPREDUCE2</name>
       <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <extends>common-services/MAPREDUCE2/2.1.0.2.0.6.0</extends>
       <version>2.4.0.2.1.1.0</version>
       <version>2.4.0.2.1.1.0</version>
       <components>
       <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <displayName>History Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>YARN/RESOURCEMANAGER</co-locate>
-          </auto-deploy>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
         <component>
         <component>
           <name>MAPREDUCE2_CLIENT</name>
           <name>MAPREDUCE2_CLIENT</name>
-          <displayName>MapReduce2 Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
           <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>mapred-site.xml</fileName>
-              <dictionaryName>mapred-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
             <configFile>
             <configFile>
               <type>env</type>
               <type>env</type>
               <fileName>mapred-env.cmd</fileName>
               <fileName>mapred-env.cmd</fileName>
@@ -189,35 +53,7 @@
           </configFiles>
           </configFiles>
         </component>
         </component>
       </components>
       </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
       <configuration-dir>configuration-mapred</configuration-dir>
       <configuration-dir>configuration-mapred</configuration-dir>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-env</config-type>
-      </configuration-dependencies>
     </service>
     </service>
 
 
   </services>
   </services>

+ 58 - 23
ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py

@@ -26,20 +26,28 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
   if args[0][-2:] == "j2" else True))
 class TestHistoryServer(RMFTestCase):
 class TestHistoryServer(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_configure_default(self):
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="configure",
                        command="configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_default()
     self.assert_configure_default()
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_start_default(self):
   def test_start_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="start",
                        command="start",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_default()
     self.assert_configure_default()
 
 
     pid_check_cmd = 'ls /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid` >/dev/null 2>&1'
     pid_check_cmd = 'ls /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop-mapreduce/mapred/mapred-mapred-historyserver.pid` >/dev/null 2>&1'
@@ -57,10 +65,13 @@ class TestHistoryServer(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_default(self):
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="stop",
                        command="stop",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config /etc/hadoop/conf stop historyserver',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config /etc/hadoop/conf stop historyserver',
                               user='mapred')
                               user='mapred')
@@ -70,18 +81,24 @@ class TestHistoryServer(RMFTestCase):
 
 
   def test_configure_secured(self):
   def test_configure_secured(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="configure",
                        command="configure",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_secured()
     self.assert_configure_secured()
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_start_secured(self):
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="start",
                        command="start",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assert_configure_secured()
     self.assert_configure_secured()
 
 
@@ -100,10 +117,13 @@ class TestHistoryServer(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_secured(self):
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="stop",
                        command="stop",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config /etc/hadoop/conf stop historyserver',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-mapreduce/sbin/mr-jobhistory-daemon.sh --config /etc/hadoop/conf stop historyserver',
                               user='mapred')
                               user='mapred')
@@ -533,10 +553,13 @@ class TestHistoryServer(RMFTestCase):
     get_params_mock.return_value = security_params
     get_params_mock.return_value = security_params
     validate_security_config_mock.return_value = result_issues
     validate_security_config_mock.return_value = result_issues
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     import status_params
     import status_params
 
 
@@ -564,10 +587,13 @@ class TestHistoryServer(RMFTestCase):
     cached_kinit_executor_mock.side_effect = Exception("Invalid command")
     cached_kinit_executor_mock.side_effect = Exception("Invalid command")
 
 
     try:
     try:
-      self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                          classname="HistoryServer",
                          classname="HistoryServer",
                          command="security_status",
                          command="security_status",
-                         config_file="secured.json")
+                         config_file="secured.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
     except:
     except:
       self.assertTrue(True)
       self.assertTrue(True)
 
 
@@ -578,10 +604,13 @@ class TestHistoryServer(RMFTestCase):
     put_structured_out_mock.reset_mock()
     put_structured_out_mock.reset_mock()
     get_params_mock.return_value = empty_security_params
     get_params_mock.return_value = empty_security_params
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal not set."})
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal not set."})
 
 
     # Testing with not empty result_issues
     # Testing with not empty result_issues
@@ -592,15 +621,21 @@ class TestHistoryServer(RMFTestCase):
     validate_security_config_mock.return_value = result_issues_with_params
     validate_security_config_mock.return_value = result_issues_with_params
     get_params_mock.return_value = security_params
     get_params_mock.return_value = security_params
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
 
     # Testing with security_enable = false
     # Testing with security_enable = false
-    self.executeScript("2.0.6/services/YARN/package/scripts/historyserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
                        classname="HistoryServer",
                        classname="HistoryServer",
                        command="security_status",
                        command="security_status",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

+ 15 - 6
ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py

@@ -26,12 +26,16 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
   if args[0][-2:] == "j2" else True))
 class TestMapReduce2Client(RMFTestCase):
 class TestMapReduce2Client(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_configure_default(self):
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/mapreduce2_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
                        classname = "MapReduce2Client",
                        classname = "MapReduce2Client",
                        command = "configure",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -162,10 +166,12 @@ class TestMapReduce2Client(RMFTestCase):
 
 
   def test_configure_secured(self):
   def test_configure_secured(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/mapreduce2_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
                        classname = "MapReduce2Client",
                        classname = "MapReduce2Client",
                        command = "configure",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       owner = 'yarn',
@@ -310,10 +316,13 @@ class TestMapReduce2Client(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_upgrade(self):
   def test_upgrade(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/mapreduce2_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
                    classname = "MapReduce2Client",
                    classname = "MapReduce2Client",
                    command = "restart",
                    command = "restart",
-                   config_file="client-upgrade.json")
+                   config_file="client-upgrade.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
     self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
 
 

+ 10 - 4
ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py

@@ -26,13 +26,17 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
   if args[0][-2:] == "j2" else True))
 class TestServiceCheck(RMFTestCase):
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_service_check_default(self):
   def test_service_check_default(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/mapred_service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapred_service_check.py",
                       classname="MapReduce2ServiceCheck",
                       classname="MapReduce2ServiceCheck",
                       command="service_check",
                       command="service_check",
-                      config_file="default.json"
+                      config_file="default.json",
+                      hdp_stack_version = self.STACK_VERSION,
+                      target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('ExecuteHadoop', 'fs -rm -r -f /user/ambari-qa/mapredsmokeoutput /user/ambari-qa/mapredsmokeinput',
     self.assertResourceCalled('ExecuteHadoop', 'fs -rm -r -f /user/ambari-qa/mapredsmokeoutput /user/ambari-qa/mapredsmokeinput',
                       try_sleep = 5,
                       try_sleep = 5,
@@ -65,10 +69,12 @@ class TestServiceCheck(RMFTestCase):
 
 
   def test_service_check_secured(self):
   def test_service_check_secured(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/mapred_service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapred_service_check.py",
                       classname="MapReduce2ServiceCheck",
                       classname="MapReduce2ServiceCheck",
                       command="service_check",
                       command="service_check",
-                      config_file="secured.json"
+                      config_file="secured.json",
+                      hdp_stack_version = self.STACK_VERSION,
+                      target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;',
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;',
                       user = 'ambari-qa',
                       user = 'ambari-qa',

+ 73 - 28
ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py

@@ -28,20 +28,28 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
   if args[0][-2:] == "j2" else True))
 class TestNodeManager(RMFTestCase):
 class TestNodeManager(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_configure_default(self):
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="configure",
                        command="configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_default()
     self.assert_configure_default()
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_start_default(self):
   def test_start_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="start",
                        command="start",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_default()
     self.assert_configure_default()
 
 
     pid_check_cmd = 'ls /var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid` >/dev/null 2>&1'
     pid_check_cmd = 'ls /var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid` >/dev/null 2>&1'
@@ -59,10 +67,13 @@ class TestNodeManager(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_default(self):
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="stop",
                        command="stop",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop nodemanager',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop nodemanager',
                               user='yarn')
                               user='yarn')
     self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid',
     self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid',
@@ -71,18 +82,24 @@ class TestNodeManager(RMFTestCase):
 
 
   def test_configure_secured(self):
   def test_configure_secured(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="configure",
                        command="configure",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_secured()
     self.assert_configure_secured()
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_start_secured(self):
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="start",
                        command="start",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assert_configure_secured()
     self.assert_configure_secured()
 
 
@@ -101,10 +118,13 @@ class TestNodeManager(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_secured(self):
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="stop",
                        command="stop",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop nodemanager',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop nodemanager',
                               user='yarn')
                               user='yarn')
     self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid',
     self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-nodemanager.pid',
@@ -568,8 +588,13 @@ class TestNodeManager(RMFTestCase):
 
 
     process_mock.return_value = (0, process_output)
     process_mock.return_value = (0, process_output)
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
-      classname="Nodemanager", command = "post_rolling_restart", config_file="default.json")
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
+                       classname="Nodemanager",
+                       command = "post_rolling_restart",
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertTrue(process_mock.called)
     self.assertTrue(process_mock.called)
     self.assertEqual(process_mock.call_count,1)
     self.assertEqual(process_mock.call_count,1)
@@ -585,8 +610,13 @@ class TestNodeManager(RMFTestCase):
     process_mock.return_value = (0, process_output)
     process_mock.return_value = (0, process_output)
 
 
     try:
     try:
-      self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
-        classname="Nodemanager", command = "post_rolling_restart", config_file="default.json")
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
+                         classname="Nodemanager",
+                         command = "post_rolling_restart",
+                         config_file="default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
       self.fail('Missing NodeManager should have caused a failure')
       self.fail('Missing NodeManager should have caused a failure')
     except Fail,fail:
     except Fail,fail:
       self.assertTrue(process_mock.called)
       self.assertTrue(process_mock.called)
@@ -603,8 +633,13 @@ class TestNodeManager(RMFTestCase):
     process_mock.return_value = (999, process_output)
     process_mock.return_value = (999, process_output)
 
 
     try:
     try:
-      self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
-        classname="Nodemanager", command = "post_rolling_restart", config_file="default.json")
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
+                         classname="Nodemanager",
+                         command = "post_rolling_restart",
+                         config_file="default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
       self.fail('Invalid return code should cause a failure')
       self.fail('Invalid return code should cause a failure')
     except Fail,fail:
     except Fail,fail:
       self.assertTrue(process_mock.called)
       self.assertTrue(process_mock.called)
@@ -640,10 +675,12 @@ class TestNodeManager(RMFTestCase):
     get_params_mock.return_value = security_params
     get_params_mock.return_value = security_params
     validate_security_config_mock.return_value = result_issues
     validate_security_config_mock.return_value = result_issues
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
     build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
     build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
@@ -662,10 +699,12 @@ class TestNodeManager(RMFTestCase):
     cached_kinit_executor_mock.side_effect = Exception("Invalid command")
     cached_kinit_executor_mock.side_effect = Exception("Invalid command")
 
 
     try:
     try:
-          self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
           )
           )
     except:
     except:
       self.assertTrue(True)
       self.assertTrue(True)
@@ -677,10 +716,12 @@ class TestNodeManager(RMFTestCase):
     put_structured_out_mock.reset_mock()
     put_structured_out_mock.reset_mock()
     get_params_mock.return_value = empty_security_params
     get_params_mock.return_value = empty_security_params
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
 
 
@@ -693,17 +734,21 @@ class TestNodeManager(RMFTestCase):
     validate_security_config_mock.return_value = result_issues_with_params
     validate_security_config_mock.return_value = result_issues_with_params
     get_params_mock.return_value = security_params
     get_params_mock.return_value = security_params
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
 
     # Testing with security_enable = false
     # Testing with security_enable = false
-    self.executeScript("2.0.6/services/YARN/package/scripts/nodemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
                        classname="Nodemanager",
                        classname="Nodemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

+ 60 - 26
ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py

@@ -26,20 +26,28 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
   if args[0][-2:] == "j2" else True))
 class TestResourceManager(RMFTestCase):
 class TestResourceManager(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_configure_default(self):
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="configure",
                        command="configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_default()
     self.assert_configure_default()
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_start_default(self):
   def test_start_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="start",
                        command="start",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assert_configure_default()
     self.assert_configure_default()
 
 
@@ -58,10 +66,13 @@ class TestResourceManager(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_default(self):
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="stop",
                        command="stop",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop resourcemanager',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop resourcemanager',
                               user='yarn')
                               user='yarn')
@@ -71,17 +82,23 @@ class TestResourceManager(RMFTestCase):
 
 
   def test_configure_secured(self):
   def test_configure_secured(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="configure",
                        command="configure",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
     self.assert_configure_secured()
     self.assert_configure_secured()
 
 
   def test_start_secured(self):
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="start",
                        command="start",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assert_configure_secured()
     self.assert_configure_secured()
 
 
@@ -100,10 +117,13 @@ class TestResourceManager(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_secured(self):
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="stop",
                        command="stop",
-                       config_file="secured.json")
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop resourcemanager',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop resourcemanager',
                               user='yarn')
                               user='yarn')
@@ -113,10 +133,12 @@ class TestResourceManager(RMFTestCase):
 
 
 
 
   def test_decommission_default(self):
   def test_decommission_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname = "Resourcemanager",
                        classname = "Resourcemanager",
                        command = "decommission",
                        command = "decommission",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
         owner = 'yarn',
         owner = 'yarn',
@@ -130,10 +152,12 @@ class TestResourceManager(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_decommission_secured(self):
   def test_decommission_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname = "Resourcemanager",
                        classname = "Resourcemanager",
                        command = "decommission",
                        command = "decommission",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
         owner = 'yarn',
         owner = 'yarn',
@@ -455,10 +479,12 @@ class TestResourceManager(RMFTestCase):
     get_params_mock.return_value = security_params
     get_params_mock.return_value = security_params
     validate_security_config_mock.return_value = result_issues
     validate_security_config_mock.return_value = result_issues
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
     build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
     build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
@@ -477,10 +503,12 @@ class TestResourceManager(RMFTestCase):
     cached_kinit_executor_mock.side_effect = Exception("Invalid command")
     cached_kinit_executor_mock.side_effect = Exception("Invalid command")
 
 
     try:
     try:
-          self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
           )
           )
     except:
     except:
       self.assertTrue(True)
       self.assertTrue(True)
@@ -492,10 +520,12 @@ class TestResourceManager(RMFTestCase):
     put_structured_out_mock.reset_mock()
     put_structured_out_mock.reset_mock()
     get_params_mock.return_value = empty_security_params
     get_params_mock.return_value = empty_security_params
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
 
 
@@ -508,17 +538,21 @@ class TestResourceManager(RMFTestCase):
     validate_security_config_mock.return_value = result_issues_with_params
     validate_security_config_mock.return_value = result_issues_with_params
     get_params_mock.return_value = security_params
     get_params_mock.return_value = security_params
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
 
     # Testing with security_enable = false
     # Testing with security_enable = false
-    self.executeScript("2.0.6/services/YARN/package/scripts/resourcemanager.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
                        classname="Resourcemanager",
                        classname="Resourcemanager",
                        command="security_status",
                        command="security_status",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

+ 19 - 8
ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py

@@ -27,12 +27,16 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
   if args[0][-2:] == "j2" else True))
 class TestYarnClient(RMFTestCase):
 class TestYarnClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_configure_default(self):
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/yarn_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
                        classname = "YarnClient",
                        classname = "YarnClient",
                        command = "configure",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -163,10 +167,12 @@ class TestYarnClient(RMFTestCase):
 
 
   def test_configure_secured(self):
   def test_configure_secured(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/yarn_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
                        classname = "YarnClient",
                        classname = "YarnClient",
                        command = "configure",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       owner = 'yarn',
@@ -311,11 +317,13 @@ class TestYarnClient(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_restart_client(self):
   def test_restart_client(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/yarn_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
                        classname = "YarnClient",
                        classname = "YarnClient",
                        command = "restart",
                        command = "restart",
                        config_file="default.json",
                        config_file="default.json",
-                       config_overrides = { 'roleParams' : { "component_category": "CLIENT" } }
+                       config_overrides = { 'roleParams' : { "component_category": "CLIENT" } },
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
 
 
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -446,10 +454,13 @@ class TestYarnClient(RMFTestCase):
 
 
 
 
   def test_upgrade(self):
   def test_upgrade(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/yarn_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
                    classname = "YarnClient",
                    classname = "YarnClient",
                    command = "restart",
                    command = "restart",
-                   config_file="client-upgrade.json")
+                   config_file="client-upgrade.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
     self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
 
 

+ 10 - 4
ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py

@@ -23,13 +23,17 @@ from stacks.utils.RMFTestCase import *
 
 
 @patch("sys.executable", new = '/usr/bin/python2.6')
 @patch("sys.executable", new = '/usr/bin/python2.6')
 class TestServiceCheck(RMFTestCase):
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_service_check_default(self):
   def test_service_check_default(self):
 
 
-    self.executeScript("2.0.6/services/YARN/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                           classname="ServiceCheck",
                           classname="ServiceCheck",
                           command="service_check",
                           command="service_check",
-                          config_file="default.json"
+                          config_file="default.json",
+                          hdp_stack_version = self.STACK_VERSION,
+                          target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('File', '/tmp/validateYarnComponentStatus.py',
     self.assertResourceCalled('File', '/tmp/validateYarnComponentStatus.py',
                           content = StaticFile('validateYarnComponentStatus.py'),
                           content = StaticFile('validateYarnComponentStatus.py'),
@@ -49,10 +53,12 @@ class TestServiceCheck(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_service_check_secured(self):
   def test_service_check_secured(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                           classname="ServiceCheck",
                           classname="ServiceCheck",
                           command="service_check",
                           command="service_check",
-                          config_file="secured.json"
+                          config_file="secured.json",
+                          hdp_stack_version = self.STACK_VERSION,
+                          target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     )
     self.assertResourceCalled('File', '/tmp/validateYarnComponentStatus.py',
     self.assertResourceCalled('File', '/tmp/validateYarnComponentStatus.py',
                           content = StaticFile('validateYarnComponentStatus.py'),
                           content = StaticFile('validateYarnComponentStatus.py'),

+ 22 - 9
ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py

@@ -28,22 +28,29 @@ origin_exists = os.path.exists
     side_effect=lambda *args: origin_exists(args[0])
     side_effect=lambda *args: origin_exists(args[0])
     if args[0][-2:] == "j2" else True))
     if args[0][-2:] == "j2" else True))
 class TestAppTimelineServer(RMFTestCase):
 class TestAppTimelineServer(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
 
   def test_configure_default(self):
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
                        classname="ApplicationTimelineServer",
                        classname="ApplicationTimelineServer",
                        command="configure",
                        command="configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assert_configure_default()
     self.assert_configure_default()
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_start_default(self):
   def test_start_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
                        classname="ApplicationTimelineServer",
                        classname="ApplicationTimelineServer",
                        command="start",
                        command="start",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assert_configure_default()
     self.assert_configure_default()
 
 
@@ -62,10 +69,13 @@ class TestAppTimelineServer(RMFTestCase):
     self.assertNoMoreResources()
     self.assertNoMoreResources()
 
 
   def test_stop_default(self):
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
                        classname="ApplicationTimelineServer",
                        classname="ApplicationTimelineServer",
                        command="stop",
                        command="stop",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop timelineserver',
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop timelineserver',
                               user='yarn')
                               user='yarn')
@@ -208,10 +218,13 @@ class TestAppTimelineServer(RMFTestCase):
 
 
 
 
   def test_status(self):
   def test_status(self):
-    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
                        classname="ApplicationTimelineServer",
                        classname="ApplicationTimelineServer",
                        command="status",
                        command="status",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
 
     self.assertResourceCalled('Execute', 'mv /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid /var/run/hadoop-yarn/yarn/yarn-yarn-timelineserver.pid',
     self.assertResourceCalled('Execute', 'mv /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid /var/run/hadoop-yarn/yarn/yarn-yarn-timelineserver.pid',
         only_if = 'test -e /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid',
         only_if = 'test -e /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid',

+ 2 - 0
ambari-web/app/data/HDP2/site_properties.js

@@ -56,6 +56,7 @@ module.exports =
       "displayType": "user",
       "displayType": "user",
       "isOverridable": false,
       "isOverridable": false,
       "isVisible": App.get('isHadoopWindowsStack'),
       "isVisible": App.get('isHadoopWindowsStack'),
+      "serviceName": "MISC",
       "filename": "cluster-env.xml",
       "filename": "cluster-env.xml",
       "category": "Users and Groups",
       "category": "Users and Groups",
       "belongsToService": ["HDFS"],
       "belongsToService": ["HDFS"],
@@ -70,6 +71,7 @@ module.exports =
       "displayType": "password",
       "displayType": "password",
       "isOverridable": false,
       "isOverridable": false,
       "isVisible": App.get('isHadoopWindowsStack'),
       "isVisible": App.get('isHadoopWindowsStack'),
+      "serviceName": "MISC",
       "filename": "cluster-env.xml",
       "filename": "cluster-env.xml",
       "category": "Users and Groups",
       "category": "Users and Groups",
       "belongsToService": ["HDFS"],
       "belongsToService": ["HDFS"],