浏览代码

AMBARI-4546. Add ATS (Application Timeline Server) to the Baikal Stack and also start/stop/configure.(vbrodetskyi)

Vitaly Brodetskyi 11 年之前
父节点
当前提交
1617c73570
共有 16 个文件被更改,包括 1446 次插入17 次删除
  1. 56 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/application_timeline_server.py
  2. 3 8
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py
  3. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
  4. 6 8
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
  5. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py
  6. 9 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn-env.sh.j2
  7. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/role_command_order.json
  8. 131 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
  9. 20 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
  10. 93 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
  11. 52 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
  12. 386 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
  13. 97 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-log4j.xml
  14. 360 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
  15. 81 0
      ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
  16. 148 0
      ambari-server/src/test/python/stacks/2.1.1/YARN/test_apptimelineserver.py

+ 56 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/application_timeline_server.py

@@ -0,0 +1,56 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from yarn import yarn
+from service import service
+
+class ApplicationTimelineServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    #self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('historyserver', action='start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    service('historyserver', action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.yarn_historyserver_pid_file)
+
+if __name__ == "__main__":
+  ApplicationTimelineServer().execute()

+ 3 - 8
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py

@@ -39,22 +39,17 @@ class Histroryserver(Script):
     import params
     env.set_params(params)
     self.configure(env) # FOR SECURITY
-    service('historyserver',
-            action='start'
-    )
+    service('historyserver', action='start', serviceName='mapreduce')
 
   def stop(self, env):
     import params
     env.set_params(params)
-
-    service('historyserver',
-            action='stop'
-    )
+    service('historyserver', action='stop', serviceName='mapreduce')
 
   def status(self, env):
     import status_params
     env.set_params(status_params)
-    check_process_status(status_params.histroyserver_pid_file)
+    check_process_status(status_params.mapred_historyserver_pid_file)
 
 if __name__ == "__main__":
   Histroryserver().execute()

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py

@@ -49,6 +49,7 @@ hadoop_yarn_home = '/usr/lib/hadoop-yarn'
 yarn_heapsize = config['configurations']['global']['yarn_heapsize']
 resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
 nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/global/apptimelineserver_heapsize", 1024)
 
 yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
 yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix

+ 6 - 8
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py

@@ -23,25 +23,23 @@ Ambari Agent
 from resource_management import *
 
 
-def service(
-    name,
-    action='start'):
+def service(componentName, action='start', serviceName='yarn'):
 
   import params
 
-  if (name == 'historyserver'):
+  if (serviceName == 'mapreduce' and componentName == 'historyserver'):
     daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
     usr = params.mapred_user
   else:
     daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
     usr = params.yarn_user
 
   cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
 
   if action == 'start':
-    daemon_cmd = format("{cmd} start {name}")
+    daemon_cmd = format("{cmd} start {componentName}")
     no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
     Execute(daemon_cmd,
             user=usr,
@@ -55,7 +53,7 @@ def service(
     )
 
   elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {name}")
+    daemon_cmd = format("{cmd} stop {componentName}")
     Execute(daemon_cmd,
             user=usr,
     )

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py

@@ -31,4 +31,5 @@ mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
 
 resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
 nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-histroyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")

+ 9 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn-env.sh.j2

@@ -79,6 +79,15 @@ export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
 # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
 export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
 
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
 # Specify the JVM options to be used when starting the NodeManager.
 # These options will be appended to the options specified as YARN_OPTS
 # and therefore may override any similar flags set in YARN_OPTS

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/role_command_order.json

@@ -75,6 +75,7 @@
     "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
     "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
     "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
     "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
     "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
     "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],

+ 131 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml

@@ -0,0 +1,131 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.2</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+  </property>
+  
+  <property>
+    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
+    <value>50</value>
+    <description>
+      No description
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+
+</configuration>

+ 20 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml

@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+</configuration>

+ 93 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml

@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>rm_host</name>
+    <value></value>
+    <description>ResourceManager.</description>
+  </property>
+  <property>
+    <name>nm_hosts</name>
+    <value></value>
+    <description>List of NodeManager Hosts.</description>
+  </property>
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <description>YARN Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <description>YARN PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <value>yarn</value>
+    <description>YARN User</description>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+  </property>
+
+  <!--MAPREDUCE2-->
+
+  <property>
+    <name>hs_host</name>
+    <value></value>
+    <description>History Server.</description>
+  </property>
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <description>Mapreduce Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <description>Mapreduce PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <description>Mapreduce User</description>
+  </property>
+
+</configuration>

+ 52 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml

@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to submit jobs to the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to
+      submit jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to delete jobs or modify job's priority for jobs not owned by the current
+      user in the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to do
+      this operation.
+    </description>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

+ 386 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml

@@ -0,0 +1,386 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>200</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.7</value>
+    <description>
+      The soft limit in the serialization buffer. Once reached, a thread will
+      begin to spill the contents to disk in the background. Note that
+      collection will not block if this threshold is exceeded while a spill
+      is already in progress, so spills may be larger than this threshold when
+      it is set to less than .5
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>100</value>
+    <description>
+      The number of streams to merge at once while sorting files.
+      This determines the number of open file handles.
+    </description>
+  </property>
+
+<!-- map/reduce properties -->
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+    <description>
+      Administrators for MapReduce applications.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>
+      The default number of parallel transfers run by reduce during
+      the copy(shuffle) phase.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some map tasks
+      may be executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some reduce tasks may be
+      executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>
+      Fraction of the number of maps in the job which should be complete before
+      reduces are scheduled for the job.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>
+      The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>
+      The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.output.compress.codec</name>
+    <value></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+    <description>
+      If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>
+      The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+  </property>
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>false</value>
+    <description>
+      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>300000</value>
+    <description>
+      The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Map task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>13562</value>
+    <description>
+      Default port that the ShuffleHandler will run on.
+      ShuffleHandler is a service run at the NodeManager to facilitate
+      transfers of intermediate Map outputs to requesting Reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>
+      The runtime framework for executing MapReduce jobs. Can be one of local,
+      classic or yarn.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+    <description>
+      The staging dir used while submitting jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Xmx312m</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>
+      Java opts for the MR App Master processes for admin purposes.
+      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+      thus its options can be overridden user.
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.log.level</name>
+    <value>INFO</value>
+    <description>MR App Master process log level.</description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.env</name>
+    <value></value>
+    <description>
+      User added environment variables for the MR App Master
+      processes. Example :
+      1) A=foo  This will set the env variable A to foo
+      2) B=$B:c This is inherit tasktracker's B env variable.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for map tasks.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for reduce tasks.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It is a
+      application-specific setting. It should not be larger than the global number
+      set by resourcemanager. Otherwise, it will be override. The default number is
+      set to 2, to allow at least one retry for AM.
+    </description>
+  </property>
+
+
+
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of maps.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of reduces.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the map task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the reduce task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+    <description>
+      Should the job outputs be compressed?
+    </description>
+  </property>
+
+</configuration>

+ 97 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-log4j.xml

@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hadoop.mapreduce.jobsummary.logger</name>
+    <value>${hadoop.root.logger}</value>
+  </property>
+  <property>
+    <name>hadoop.mapreduce.jobsummary.log.file</name>
+    <value>hadoop-mapreduce.jobsummary.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+
+  <!--Only for HDP2.0 below-->
+  <property>
+    <name>yarn.server.resourcemanager.appsummary.log.file</name>
+    <value>hadoop-mapreduce.jobsummary.log</value>
+  </property>
+  <property>
+    <name>yarn.server.resourcemanager.appsummary.logger</name>
+    <value>${hadoop.root.logger}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.File</name>
+    <value>/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.MaxFileSize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.MaxBackupIndex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary</name>
+    <value>${yarn.server.resourcemanager.appsummary.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary</name>
+    <value>false</value>
+  </property>
+  <!--Only for HDP2.0 above-->
+
+</configuration>

+ 360 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml

@@ -0,0 +1,360 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      TThe minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>2048</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>true</value>
+    <description> Are acls enabled. </description>
+  </property>
+
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>*</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+  </property>
+
+  <!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+    <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
+      not start with numbers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+  </property>
+
+  <!--
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check_nodemanager</value>
+    <description>The health check script to run.</description>
+  </property>
+   -->
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation. </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>false</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.ahs.store.class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description></description>
+  </property>
+
+  <property>
+    <name>yarn.ats.store.class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbApplicationTimelineStore</value>
+    <description></description>
+  </property>
+
+  <property>
+    <name>yarn.ats.leveldb-timeline-store.path</name>
+    <value>/var/log/hadoop-yarn/ats</value>
+    <description></description>
+  </property>
+
+</configuration>

+ 81 - 0
ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml

@@ -0,0 +1,81 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>YARN</name>
+            <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+            <version>2.1.0.2.1.1</version>
+            <components>
+
+                <component>
+                    <name>APP_TIMELINE_SERVER</name>
+                    <category>MASTER</category>
+                    <commandScript>
+                        <script>scripts/application_timeline_server.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+
+
+            </components>
+
+            <osSpecifics>
+                <osSpecific>
+                    <osType>any</osType>
+                    <packages>
+                        <package>
+                            <type>rpm</type>
+                            <name>hadoop-yarn</name>
+                        </package>
+                        <package>
+                            <type>rpm</type>
+                            <name>hadoop-yarn-nodemanager</name>
+                        </package>
+                        <package>
+                            <type>rpm</type>
+                            <name>hadoop-mapreduce</name>
+                        </package>
+                        <package>
+                            <type>rpm</type>
+                            <name>hadoop-yarn-proxyserver</name>
+                        </package>
+                        <package>
+                            <type>rpm</type>
+                            <name>hadoop-yarn-resourcemanager</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+
+            <configuration-dependencies>
+                <config-type>yarn-site</config-type>
+                <config-type>capacity-scheduler</config-type>
+                <config-type>core-site</config-type>
+                <config-type>global</config-type>
+                <config-type>mapred-site</config-type>
+                <config-type>mapred-queue-acls</config-type>
+                <config-type>yarn-log4j</config-type>
+            </configuration-dependencies>
+        </service>
+
+    </services>
+</metainfo>

+ 148 - 0
ambari-server/src/test/python/stacks/2.1.1/YARN/test_apptimelineserver.py

@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestAppTimelineServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+                       classname = "ApplicationTimelineServer",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+                       classname = "ApplicationTimelineServer",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf start historyserver',
+                              not_if = 'ls /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid` >/dev/null 2>&1',
+                              user = 'yarn')
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid` >/dev/null 2>&1',
+                              initial_wait = 5,
+                              not_if = 'ls /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid` >/dev/null 2>&1',
+                              user = 'yarn')
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/YARN/package/scripts/application_timeline_server.py",
+                       classname = "ApplicationTimelineServer",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop historyserver',
+                              user = 'yarn')
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop-yarn/yarn/yarn-yarn-historyserver.pid',
+                              user = 'yarn')
+    self.assertNoMoreResources()
+
+
+
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              owner = 'yarn',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              owner = 'yarn',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+                              owner = 'yarn',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0644,
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['core-site'],
+                              )
+    self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              mode = 0644,
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['mapred-site'],
+                              )
+    self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              mode = 0644,
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['yarn-site'],
+                              )
+    self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              mode = 0644,
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['capacity-scheduler'],
+                              )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/var/log/hadoop-mapreduce/mapred/hadoop-mapreduce.jobsummary.log',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
+                              content = Template('yarn.conf.j2'),
+                              mode = 0644,
+                              )
+    self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
+                              content = Template('mapreduce.conf.j2'),
+                              mode = 0644,
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
+                              content = Template('yarn-env.sh.j2'),
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              mode = 0755,
+                              )