Переглянути джерело

AMBARI-4079. YARN on HDP2. Using resource management lib (Arsen Babych
via aonishuk)

Andrew Onischuk 11 роки тому
батько
коміт
a58a0f801a
24 змінених файлів з 1176 додано та 515 видалено
  1. 0 20
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/core-site.xml
  2. 0 44
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/global.xml
  3. 0 37
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metainfo.xml
  4. 0 383
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metrics.json
  5. 24 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml
  6. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-queue-acls.xml
  7. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-site.xml
  8. 139 19
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml
  9. 165 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/files/validateYarnComponentStatus.py
  10. 21 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/__init__.py
  11. 55 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/historyserver.py
  12. 67 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapred_service_check.py
  13. 40 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapreduce2_client.py
  14. 56 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/nodemanager.py
  15. 84 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/params.py
  16. 78 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/resourcemanager.py
  17. 65 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service.py
  18. 67 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service_check.py
  19. 128 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/yarn.py
  20. 40 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/yarn_client.py
  21. 22 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/container-executor.cfg.j2
  22. 3 6
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/mapreduce.conf.j2
  23. 119 0
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/yarn-env.sh.j2
  24. 3 6
      ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/yarn.conf.j2

+ 0 - 20
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/core-site.xml

@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

+ 0 - 44
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/global.xml

@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-</configuration>

+ 0 - 37
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metainfo.xml

@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (client libraries)</comment>
-    <version>2.1.0.2.0.6.0</version>
-    <components>
-        <component>
-            <name>HISTORYSERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MAPREDUCE2_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>mapred-site</config-type>
-    </configuration-dependencies>
-</metainfo>

+ 0 - 383
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metrics.json

@@ -1,383 +0,0 @@
-{
-  "HISTORYSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.metrics.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.metrics.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ]
-  }
-}

+ 24 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml

@@ -61,4 +61,28 @@
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
   </property>
+
+  <!--MAPREDUCE2-->
+
+  <property>
+    <name>hs_host</name>
+    <value></value>
+    <description>History Server.</description>
+  </property>
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <description>Mapreduce Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <description>Mapreduce PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <description>Mapreduce User</description>
+  </property>
+
 </configuration>

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-queue-acls.xml → ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-queue-acls.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-site.xml → ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-site.xml


+ 139 - 19
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml

@@ -15,28 +15,148 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-    <version>2.1.0.2.0.6.0</version>
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
         <component>
-            <name>RESOURCEMANAGER</name>
-            <category>MASTER</category>
+          <name>NODEMANAGER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-nodemanager</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-proxyserver</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-resourcemanager</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
         <component>
-            <name>NODEMANAGER</name>
-            <category>SLAVE</category>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-       <component>
-            <name>YARN_CLIENT</name>
-            <category>CLIENT</category>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>core-site</config-type>
-      <config-type>yarn-site</config-type>
-      <config-type>capacity-scheduler</config-type>
-    </configuration-dependencies>
-</metainfo>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce-historyserver</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
+</metainfo>

+ 165 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/files/validateYarnComponentStatus.py

@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+  command = "curl"
+  httpGssnegotiate = "--negotiate"
+  userpswd = "-u:"
+  insecure = "-k"# This is smoke test, no need to check CA of server
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+      
+  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+  try:
+    proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    (stdout, stderr) = proc.communicate()
+    response = json.loads(stdout)
+    if response == None:
+      print 'There is no response for url: ' + str(url)
+      exit(1)
+    return response
+  except Exception as e:
+    print 'Error getting response for url:' + str(url), e
+    exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAvailabilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking availability status of component', e
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAbilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking ability of component', e
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+  
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

+ 21 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/__init__.py

@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

+ 55 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/historyserver.py

@@ -0,0 +1,55 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Histroryserver(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('historyserver',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('historyserver',
+            action='stop'
+    )
+
+if __name__ == "__main__":
+  Histroryserver().execute()

+ 67 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapred_service_check.py

@@ -0,0 +1,67 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
+    create_file_cmd = format("fs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    ExecuteHadoop(cleanup_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()

+ 40 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapreduce2_client.py

@@ -0,0 +1,40 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class MapReduce2Client(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()

+ 56 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/nodemanager.py

@@ -0,0 +1,56 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('nodemanager',
+            action='stop'
+    )
+
+if __name__ == "__main__":
+  Nodemanager().execute()

+ 84 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/params.py

@@ -0,0 +1,84 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/hadoop/conf"
+
+mapred_user = config['configurations']['global']['mapred_user']
+yarn_user = config['configurations']['global']['yarn_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+
+java64_home = config['configurations']['global']['java64_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+yarn_heapsize = config['configurations']['global']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
+
+yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
+mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
+
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+
+user_group = config['configurations']['global']['user_group']
+limits_conf_dir = "/etc/security/limits.d"
+hadoop_conf_dir = "/etc/hadoop/conf"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"

+ 78 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/resourcemanager.py

@@ -0,0 +1,78 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Resourcemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('resourcemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='stop'
+    )
+
+if __name__ == "__main__":
+  Resourcemanager().execute()

+ 65 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service.py

@@ -0,0 +1,65 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(
+    name,
+    action='start'):
+
+  import params
+
+  if (name == 'historyserver'):
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
+    usr = params.mapred_user
+  else:
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
+    usr = params.yarn_user
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("su - {usr} -c '{cmd} start {name}'")
+    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            #user=usr,      #Fix execution from user
+            not_if=no_op
+    )
+
+    Execute(no_op,
+            user=usr,
+            not_if=no_op,
+            initial_wait=5
+    )
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {name}")
+    Execute(daemon_cmd,
+            user=usr,
+    )
+    rm_pid = format("rm -f {pid_file}")
+    Execute(rm_pid,
+            user=usr
+    )

+ 67 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service_check.py

@@ -0,0 +1,67 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    run_yarn_check_cmd = "/usr/bin/yarn node -list"
+
+    component_type = 'rm'
+    if params.hadoop_ssl_enabled:
+      component_address = params.rm_webui_https_address
+    else:
+      component_address = params.rm_webui_address
+
+    validateStatusFileName = "validateYarnComponentStatus.py"
+    validateStatusFilePath = format("/tmp/{validateStatusFileName}")
+
+    validateStatusCmd = format("{validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            logoutput=True
+    )
+
+    Execute(run_yarn_check_cmd,
+                  user=params.smokeuser
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

+ 128 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/yarn.py

@@ -0,0 +1,128 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def yarn():
+  import params
+
+  Directory([params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.mapred_pid_dir, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.nm_local_dirs, params.nm_log_dirs, params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            recursive=True
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+
+  File(params.mapred_job_summary_log,
+       owner=params.mapred_user,
+       group=params.user_group
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(format("{config_dir}/yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=Template('yarn-env.sh.j2')
+  )
+
+  File(format("{config_dir}/hadoop-env.sh"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       mode=0755,
+       content=StaticFile(format('{hadoop_conf_dir}/hadoop-env.sh'))
+  )
+
+  if params.security_enabled:
+    container_executor = format("{yarn_container_bin}/container-executor")
+    File(container_executor,
+         group=params.yarn_executor_container_group,
+         mode=6050
+    )
+
+    Execute(format("chmod 6050 {yarn_container_bin}/container-executor"))   #Fix File permission setting
+
+    File(format("{config_dir}/container-executor.cfg"),
+         group=params.user_group,
+         mode=0644,
+         content=Template('container-executor.cfg.j2')
+    )
+
+

+ 40 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/yarn_client.py

@@ -0,0 +1,40 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class YarnClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+if __name__ == "__main__":
+  YarnClient().execute()

+ 22 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/container-executor.cfg.j2

@@ -0,0 +1,22 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users = hfds,yarn,mapred,bin
+min.user.id=1000

+ 3 - 6
ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/container-executor.cfg → ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/mapreduce.conf.j2

@@ -1,4 +1,3 @@
-#
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
@@ -13,8 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0
+
+{{mapred_user}}   - nofile 32768
+{{mapred_user}}   - nproc  65536

+ 119 - 0
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/yarn-env.sh.j2

@@ -0,0 +1,119 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

+ 3 - 6
ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/container-executor.cfg → ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/templates/yarn.conf.j2

@@ -1,4 +1,3 @@
-#
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
@@ -13,8 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0
+
+{{yarn_user}}   - nofile 32768
+{{yarn_user}}   - nproc  65536