소스 검색

Merge branch 'trunk' into HDFS-7240

Xiaoyu Yao 7 년 전
부모
커밋
3d9a949183

+ 2 - 2
hadoop-project-dist/pom.xml

@@ -120,7 +120,7 @@
             <docletArtifact>
               <groupId>org.apache.hadoop</groupId>
               <artifactId>hadoop-annotations</artifactId>
-              <version>${project.version}</version>
+              <version>${hadoop.version}</version>
             </docletArtifact>
           </docletArtifacts>
           <useStandardDocletOptions>true</useStandardDocletOptions>
@@ -393,7 +393,7 @@
               <dependency>
                 <groupId>org.apache.hadoop</groupId>
                 <artifactId>hadoop-assemblies</artifactId>
-                <version>${project.version}</version>
+                <version>${hadoop.version}</version>
               </dependency>
             </dependencies>
             <executions>

+ 79 - 79
hadoop-project/pom.xml

@@ -50,7 +50,7 @@
 
     <kafka.version>0.8.2.1</kafka.version>
 
-    <hadoop.assemblies.version>${project.version}</hadoop.assemblies.version>
+    <hadoop.assemblies.version>3.2.0-SNAPSHOT</hadoop.assemblies.version>
     <commons-daemon.version>1.0.13</commons-daemon.version>
 
     <test.build.dir>${project.build.directory}/test-dir</test.build.dir>
@@ -141,7 +141,7 @@
     <frontend-maven-plugin.version>1.5</frontend-maven-plugin.version>
     <!-- the version of Hadoop declared in the version resources; can be overridden
     so that Hadoop 3.x can declare itself a 2.x artifact. -->
-    <declared.hadoop.version>${project.version}</declared.hadoop.version>
+    <declared.hadoop.version>${hadoop.version}</declared.hadoop.version>
 
     <swagger-annotations-version>1.5.4</swagger-annotations-version>
     <snakeyaml.version>1.16</snakeyaml.version>
@@ -170,392 +170,392 @@
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-assemblies</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-annotations</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-modules</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>pom</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-api</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-check-invariants</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>pom</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-check-test-invariants</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>pom</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-integration-tests</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-runtime</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client-minicluster</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-auth</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-auth</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-nfs</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs-client</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs-rbf</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdfs</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-app</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-app</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-api</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-client</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-core</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>pom</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
          <artifactId>hadoop-yarn-server-tests</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-registry</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-nodemanager</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-          <version>${project.version}</version>
+          <version>${hadoop.version}</version>
           <type>test-jar</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
      <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
         <type>test-jar</type>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-server-router</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-services-core</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-client-hs</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-mapreduce-examples</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-gridmix</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-streaming</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-archives</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-archive-logs</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-distcp</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-distcp</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-datajoin</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-rumen</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-extras</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-client</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minicluster</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minikdc</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-openstack</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-azure</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-azure-datalake</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-aws</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-aliyun</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
 
       <dependency>
@@ -668,12 +668,12 @@
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-kms</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-kms</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
         <type>test-jar</type>
       </dependency>
 
@@ -1257,12 +1257,12 @@
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-sls</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-cloud-storage</artifactId>
-        <version>${project.version}</version>
+        <version>${hadoop.version}</version>
       </dependency>
       <dependency>
         <groupId>com.google.code.findbugs</groupId>
@@ -1661,7 +1661,7 @@
         <plugin>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-maven-plugins</artifactId>
-          <version>${project.version}</version>
+          <version>${hadoop.version}</version>
         </plugin>
       </plugins>
     </pluginManagement>
@@ -1690,14 +1690,14 @@
         <version>${maven-remote-resources-plugin.version}</version>
         <configuration>
           <resourceBundles>
-            <resourceBundle>org.apache.hadoop:hadoop-build-tools:${project.version}</resourceBundle>
+            <resourceBundle>org.apache.hadoop:hadoop-build-tools:${hadoop.version}</resourceBundle>
           </resourceBundles>
         </configuration>
         <dependencies>
           <dependency>
             <groupId>org.apache.hadoop</groupId>
             <artifactId>hadoop-build-tools</artifactId>
-            <version>${project.version}</version>
+            <version>${hadoop.version}</version>
           </dependency>
         </dependencies>
         <executions>
@@ -2008,7 +2008,7 @@
           <dependency>
             <groupId>org.apache.hadoop</groupId>
             <artifactId>${hbase-server-artifactid}</artifactId>
-            <version>${project.version}</version>
+            <version>${hadoop.version}</version>
           </dependency>
         </dependencies>
       </dependencyManagement>
@@ -2035,7 +2035,7 @@
           <dependency>
             <groupId>org.apache.hadoop</groupId>
             <artifactId>${hbase-server-artifactid}</artifactId>
-            <version>${project.version}</version>
+            <version>${hadoop.version}</version>
           </dependency>
         </dependencies>
       </dependencyManagement>

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -2660,7 +2660,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
       TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 
-  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 60000;
 
   public static final String NUMBER_OF_ASYNC_ENTITIES_TO_MERGE =
       TIMELINE_SERVICE_PREFIX

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

@@ -2499,7 +2499,7 @@
     <description>Time period till which the application collector will be alive
      in NM, after the  application master container finishes.</description>
     <name>yarn.timeline-service.app-collector.linger-period.ms</name>
-    <value>1000</value>
+    <value>60000</value>
   </property>
 
   <property>

+ 150 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java

@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -38,18 +42,24 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.hamcrest.CoreMatchers;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
+import static org.hamcrest.MatcherAssert.assertThat;
+
 public class TestCapacitySchedulerSurgicalPreemption
     extends CapacitySchedulerPreemptionTestBase {
 
+  private static final int NUM_NM = 5;
   @Override
   @Before
   public void setUp() throws Exception {
@@ -860,6 +870,146 @@ public class TestCapacitySchedulerSurgicalPreemption
     rm1.close();
   }
 
+  private void initializeConfProperties(CapacitySchedulerConfiguration conf)
+      throws IOException {
+
+    conf.setQueues("root", new String[] {"A", "B"});
+    conf.setCapacity("root.A", 50);
+    conf.setCapacity("root.B", 50);
+    conf.setQueuePriority("root.A", 1);
+    conf.setQueuePriority("root.B", 2);
+
+    conf.set(PREFIX + "root.ordering-policy", "priority-utilization");
+    conf.set(PREFIX + "ordering-policy.priority-utilization.underutilized-preemption.enabled", "true");
+    conf.set(PREFIX + "ordering-policy.priority-utilization.underutilized-preemption.allow-move-reservation", "false");
+    conf.set(PREFIX + "ordering-policy.priority-utilization.underutilized-preemption.reserved-container-delay-ms", "0");
+    conf.set(PREFIX + "root.accessible-node-labels.x.capacity", "100");
+
+    // Setup queue access to node labels
+    conf.set(PREFIX + "root.A.accessible-node-labels", "x");
+    conf.set(PREFIX + "root.B.accessible-node-labels", "x");
+    conf.set(PREFIX + "root.A.default-node-label-expression", "x");
+    conf.set(PREFIX + "root.B.default-node-label-expression", "x");
+    conf.set(PREFIX + "root.A.accessible-node-labels.x.capacity", "50");
+    conf.set(PREFIX + "root.B.accessible-node-labels.x.capacity", "50");
+    conf.set(PREFIX + "root.A.user-limit-factor", "100");
+    conf.set(PREFIX + "root.B.user-limit-factor", "100");
+    conf.set(PREFIX + "maximum-am-resource-percent", "1");
+
+    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+    conf.set(YarnConfiguration.RM_AM_MAX_ATTEMPTS, "1");
+    conf.set(CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, "1000");
+    conf.set(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, "1000");
+    conf.set(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND, "0.5");
+    conf.set(CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR, "1");
+
+  }
+
+  @Test
+  public void testPriorityPreemptionWithNodeLabels() throws Exception {
+    // set up queue priority and capacity
+    CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+
+    initializeConfProperties(conf);
+
+    MockRM rm1 = new MockRM(conf) {
+      protected RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+    rm1.start();
+
+    MockNM[] mockNMs = new MockNM[NUM_NM];
+    for (int i = 0; i < NUM_NM; i++) {
+      mockNMs[i] = rm1.registerNode("h" + i + ":1234", 6144);
+    }
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+
+    mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("x")));
+
+    RMNode[] rmNodes = new RMNode[5];
+    for (int i = 0; i < NUM_NM; i++) {
+      rmNodes[i] = rm1.getRMContext().getRMNodes().get(mockNMs[i].getNodeId());
+      mgr.replaceLabelsOnNode(
+          ImmutableMap.of(rmNodes[i].getNodeID(), ImmutableSet.of("x")));
+    }
+
+    // launch an app to queue B, AM container launched in nm4
+    RMApp app1 = rm1.submitApp(4096, "app", "user", null, "B");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, mockNMs[4]);
+
+    am1.allocate("*", 4096, NUM_NM-1, new ArrayList<>());
+
+    // Do allocation for nm0-nm3
+    for (int i = 0; i < NUM_NM-1; i++) {
+      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
+    }
+
+    // App1 should have 5 containers now, one for each node
+    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
+        am1.getApplicationAttemptId());
+    Assert.assertEquals(NUM_NM, schedulerApp1.getLiveContainers().size());
+    for (int i = 0; i < NUM_NM; i++) {
+      waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(
+          rmNodes[i].getNodeID()), am1.getApplicationAttemptId(), 1);
+    }
+
+    // Submit app2 to queue A and asks for a 750MB container for AM (on n0)
+    RMApp app2 = rm1.submitApp(1024, "app", "user", null, "A");
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, mockNMs[0]);
+    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
+        ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
+
+    // Ask NUM_NM-1 * 1500MB containers
+    am2.allocate("*", 2048, NUM_NM-1, new ArrayList<>());
+
+    // Do allocation for n1-n4
+    for (int i = 1; i < NUM_NM; i++) {
+      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
+    }
+
+    // kill app1
+    rm1.killApp(app1.getApplicationId());
+
+    // Submit app3 to queue B and asks for a 5000MB container for AM (on n2)
+    RMApp app3 = rm1.submitApp(1024, "app", "user", null, "B");
+    MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, mockNMs[2]);
+    FiCaSchedulerApp schedulerApp3 = cs.getApplicationAttempt(
+        ApplicationAttemptId.newInstance(app3.getApplicationId(), 1));
+
+    // Ask NUM_NM * 5000MB containers
+    am3.allocate("*", 5120, NUM_NM, new ArrayList<>());
+
+    // Do allocation for n0-n4
+    for (int i = 0; i < NUM_NM; i++) {
+      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
+    }
+
+    // Sleep the timeout interval, we should see 2 containers selected
+    Thread.sleep(1000);
+
+    SchedulingMonitorManager smm = ((CapacityScheduler) rm1.
+        getResourceScheduler()).getSchedulingMonitorManager();
+    SchedulingMonitor smon = smm.getAvailableSchedulingMonitor();
+    ProportionalCapacityPreemptionPolicy editPolicy =
+        (ProportionalCapacityPreemptionPolicy) smon.getSchedulingEditPolicy();
+    editPolicy.editSchedule();
+
+    // We should only allow to preempt 2 containers, on node1 and node2
+    Set<RMContainer> selectedToPreempt =
+        editPolicy.getToPreemptContainers().keySet();
+    Assert.assertEquals(2, selectedToPreempt.size());
+    List<NodeId> selectedToPreemptNodeIds = new ArrayList<>();
+    for (RMContainer rmc : selectedToPreempt) {
+      selectedToPreemptNodeIds.add(rmc.getAllocatedNode());
+    }
+    assertThat(selectedToPreemptNodeIds, CoreMatchers.hasItems(
+        mockNMs[1].getNodeId(), mockNMs[2].getNodeId()));
+
+    rm1.close();
+
+  }
 
   @Test(timeout = 60000)
   public void testPreemptionForFragmentatedCluster() throws Exception {

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java

@@ -66,6 +66,8 @@ public class TestPerNodeTimelineCollectorsAuxService {
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
     conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
         FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+    conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+        1000L);
   }
 
   @After

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md

@@ -138,7 +138,7 @@ New configuration parameters that are introduced with v.2 are marked bold.
 | `yarn.timeline-service.reader.bind-host` | The actual address the timeline reader will bind to. If this optional address is set, reader server will bind to this address and the port specified in yarn.timeline-service.reader.webapp.address. This is most useful for making the service listen on all interfaces by setting to 0.0.0.0. |
 | **`yarn.timeline-service.hbase.configuration.file`** | Optional URL to an hbase-site.xml configuration file to be used to connect to the timeline-service hbase cluster. If empty or not specified, then the HBase configuration will be loaded from the classpath. When specified the values in the specified configuration file will override those from the ones that are present on the classpath. Defaults to `null`. |
 | **`yarn.timeline-service.writer.flush-interval-seconds`** | The setting that controls how often the timeline collector flushes the timeline writer. Defaults to `60`. |
-| **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the  application master container finishes. Defaults to `1000` (1 second). |
+| **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the application master container finishes. Defaults to `60000` (60 seconds). |
 | **`yarn.timeline-service.timeline-client.number-of-async-entities-to-merge`** | Time line V2 client tries to merge these many number of async entities (if available) and then call the REST ATS V2 API to submit. Defaults to `10`. |
 | **`yarn.timeline-service.hbase.coprocessor.app-final-value-retention-milliseconds`** | The setting that controls how long the final value of a metric of a completed app is retained before merging into the flow sum. Defaults to `259200000` (3 days). This should be set in the HBase cluster. |
 | **`yarn.rm.system-metrics-publisher.emit-container-events`** | The setting that controls whether yarn container metrics is published to the timeline server or not by RM. This configuration setting is for ATS V2. Defaults to `false`. |

+ 27 - 0
pom.xml

@@ -79,6 +79,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
   </organization>
 
   <properties>
+    <!-- required as child projects with different version can't use ${project.version} -->
+    <hadoop.version>3.2.0-SNAPSHOT</hadoop.version>
+
     <distMgmtSnapshotsId>apache.snapshots.https</distMgmtSnapshotsId>
     <distMgmtSnapshotsName>Apache Development Snapshot Repository</distMgmtSnapshotsName>
     <distMgmtSnapshotsUrl>https://repository.apache.org/content/repositories/snapshots</distMgmtSnapshotsUrl>
@@ -421,6 +424,30 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
         <artifactId>dependency-check-maven</artifactId>
         <version>${dependency-check-maven.version}</version>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>${maven-enforcer-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>enforce-property</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <requireProperty>
+                  <property>hadoop.version</property>
+                  <message>You must set a hadoop.version to be the same as ${project.version}</message>
+                  <regex>${project.version}</regex>
+                  <regexMessage>The hadoop.version property should be set and should be ${project.version}.</regexMessage>
+                </requireProperty>
+              </rules>
+              <fail>true</fail>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>