浏览代码

YARN-1306. Clean up hadoop-sls sample-conf according to YARN-1228 (Wei Yan via Sandy Ryza)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1536982 13f79535-47bb-0310-9956-ffa450edef68
Sanford Ryza 11 年之前
父节点
当前提交
87adffe877

+ 0 - 7
hadoop-tools/hadoop-sls/src/main/sample-conf/capacity-scheduler.xml

@@ -57,11 +57,4 @@
     <name>yarn.scheduler.capacity.root.sls_queue_3.maximum-capacity</name>
     <value>100</value>
   </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>1000</value>
-    <description>Maximum number of applications in the system which 
-    can be concurrently active both running and pending</description>
-  </property>
 </configuration>

+ 0 - 50
hadoop-tools/hadoop-sls/src/main/sample-conf/fair-scheduler-allocation.xml

@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!--
-  This file contains pool and user allocations for the Fair Scheduler.
-  Its format is explained in the Fair Scheduler documentation at
-  http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html
-  The documentation also includes a sample config file.
--->
-
-<allocations>
-  <user name="jenkins">
-    <!-- Limit on running jobs for the user across all pools. If more
-      jobs than this are submitted, only the first <maxRunningJobs> will
-      be scheduled at any given time. Defaults to infinity or the
-      userMaxJobsDefault value set below. -->
-    <maxRunningJobs>1000</maxRunningJobs>
-  </user>
-  <userMaxAppsDefault>1000</userMaxAppsDefault>
-  <queue name="sls_queue_1">
-    <minResources>1024 mb, 1 vcores</minResources>
-    <schedulingMode>fair</schedulingMode>
-    <weight>0.25</weight>
-    <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
-  </queue>
-  <queue name="sls_queue_2">
-    <minResources>1024 mb, 1 vcores</minResources>
-    <schedulingMode>fair</schedulingMode>
-    <weight>0.25</weight>
-    <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
-  </queue>
-  <queue name="sls_queue_3">
-    <minResources>1024 mb, 1 vcores</minResources>
-    <weight>0.5</weight>
-    <schedulingMode>fair</schedulingMode>
-    <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
-  </queue>
-</allocations>

+ 28 - 25
hadoop-tools/hadoop-sls/src/main/sample-conf/fair-scheduler.xml

@@ -20,28 +20,31 @@
   The documentation also includes a sample config file.
 -->
 
-<configuration>
-  <property>
-    <description>Absolute path to allocation file. An allocation file is an XML
-    manifest describing queues and their properties, in addition to certain
-    policy defaults. This file must be in XML format as described in
-    http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html.
-    </description>
-    <name>yarn.scheduler.fair.allocation.file</name>
-    <value>fair-scheduler-allocation.xml</value>
-  </property>
-
-  <property>
-    <description>Whether to use preemption. Note that preemption is experimental
-    in the current version. Defaults to false.</description>
-    <name>yarn.scheduler.fair.preemption</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <description>Whether to allow multiple container assignments in one
-    heartbeat. Defaults to false.</description>
-    <name>yarn.scheduler.fair.assignmultiple</name>
-    <value>true</value>
-  </property>
-</configuration>
+<allocations>
+  <user name="jenkins">
+    <!-- Limit on running jobs for the user across all pools. If more
+      jobs than this are submitted, only the first <maxRunningJobs> will
+      be scheduled at any given time. Defaults to infinity or the
+      userMaxJobsDefault value set below. -->
+    <maxRunningJobs>1000</maxRunningJobs>
+  </user>
+  <userMaxAppsDefault>1000</userMaxAppsDefault>
+  <queue name="sls_queue_1">
+    <minResources>1024 mb, 1 vcores</minResources>
+    <schedulingMode>fair</schedulingMode>
+    <weight>0.25</weight>
+    <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
+  </queue>
+  <queue name="sls_queue_2">
+    <minResources>1024 mb, 1 vcores</minResources>
+    <schedulingMode>fair</schedulingMode>
+    <weight>0.25</weight>
+    <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
+  </queue>
+  <queue name="sls_queue_3">
+    <minResources>1024 mb, 1 vcores</minResources>
+    <weight>0.5</weight>
+    <schedulingMode>fair</schedulingMode>
+    <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
+  </queue>
+</allocations>

+ 20 - 0
hadoop-tools/hadoop-sls/src/main/sample-conf/yarn-site.xml

@@ -57,4 +57,24 @@
     <value>false</value>
   </property>
 
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>1000</value>
+    <description>Maximum number of applications in the system which
+      can be concurrently active both running and pending</description>
+  </property>
+
+  <property>
+    <description>Whether to use preemption. Note that preemption is experimental
+      in the current version. Defaults to false.</description>
+    <name>yarn.scheduler.fair.preemption</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <description>Whether to allow multiple container assignments in one
+      heartbeat. Defaults to false.</description>
+    <name>yarn.scheduler.fair.assignmultiple</name>
+    <value>true</value>
+  </property>
 </configuration>

+ 3 - 0
hadoop-yarn-project/CHANGES.txt

@@ -70,6 +70,9 @@ Release 2.3.0 - UNRELEASED
     HADOOP-9598. Improve code coverage of RMAdminCLI (Aleksey Gorshkov and
     Andrey Klochkov via jeagles)
 
+    YARN-1306. Clean up hadoop-sls sample-conf according to YARN-1228 (Wei Yan
+    via Sandy Ryza)
+
   OPTIMIZATIONS
 
   BUG FIXES