Parcourir la source

revert cd5138739ad500fc1885ba77d5c949e9aa395a53 (MAPREDUCE-5844) and 956e023bc04d6cb61ac99b39006bccf4603722be (MAPREDUCE-5825) as MAPREDUCE-5900 doesn't need them

Wangda Tan il y a 11 ans
Parent
commit
c87f1a050c

+ 0 - 50
hadoop-mapreduce-project/CHANGES.txt

@@ -29,57 +29,7 @@ Release 2.5.0 - UNRELEASED
     MAPREDUCE-5825. Provide diagnostics for reducers killed during ramp down
     (Gera Shegalov via jlowe)
 
-<<<<<<< HEAD
 >>>>>>> 10a3185... MAPREDUCE-5825. Provide diagnostics for reducers killed during ramp down. Contributed by Gera Shegalov
-=======
-    MAPREDUCE-5836. Fix typo in RandomTextWriter (Akira AJISAKA via jeagles)
-
-    MAPREDUCE-5852. Prepare MapReduce codebase for JUnit 4.11. (cnauroth)
-
-    MAPREDUCE-5639. Port DistCp2 document to trunk (Akira AJISAKA via jeagles)
-
-    MAPREDUCE-5812. Make job context available to
-    OutputCommitter.isRecoverySupported() (Mohammad Kamrul Islam via jlowe)
-
-    MAPREDUCE-5638. Port Hadoop Archives document to trunk (Akira AJISAKA via
-    jeagles)
-
-    MAPREDUCE-5402. In DynamicInputFormat, change MAX_CHUNKS_TOLERABLE,
-    MAX_CHUNKS_IDEAL, MIN_RECORDS_PER_CHUNK and SPLIT_RATIO to be configurable.
-    (Tsuyoshi OZAWA via szetszwo)
-
-    MAPREDUCE-5637. Convert Hadoop Streaming document to APT (Akira AJISAKA via
-    jeagles)
-
-    MAPREDUCE-5636. Convert MapReduce Tutorial document to APT (Akira AJISAKA
-    via jeagles)
-
-    MAPREDUCE-5774. Job overview in History UI should list reducer phases in
-    chronological order. (Gera Shegalov via kasha)
-
-    MAPREDUCE-5652. NM Recovery. ShuffleHandler should handle NM restarts.
-    (Jason Lowe via kasha)
-
-    MAPREDUCE-5861. finishedSubMaps field in LocalContainerLauncher does not 
-    need to be volatile. (Tsuyoshi OZAWA via junping_du)
-
-    MAPREDUCE-5809. Enhance distcp to support preserving HDFS ACLs. (cnauroth)
-
-    MAPREDUCE-5899. Support incremental data copy in DistCp. (jing9)
-
-    MAPREDUCE-5886. Allow wordcount example job to accept multiple input paths.
-    (cnauroth)
-
-    MAPREDUCE-5834. Increased test-timeouts in TestGridMixClasses to avoid
-    occassional failures. (Mit Desai via vinodkv)
-
-    MAPREDUCE-5896. InputSplits should indicate which locations have the block 
-    cached in memory. (Sandy Ryza via kasha)
-
-    MAPREDUCE-5844. Add a configurable delay to reducer-preemption. 
-    (Maysam Yabandeh via kasha)
-
->>>>>>> a29b1c2... MAPREDUCE-5844. Add a configurable delay to reducer-preemption. (Maysam Yabandeh via kasha)
   OPTIMIZATIONS
 
   BUG FIXES 

+ 2 - 2
hadoop-mapreduce-project/dev-support/findbugs-exclude.xml

@@ -470,8 +470,8 @@
    <Match>
      <Class name="org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator" />
      <Or>
-      <Field name="mapResourceRequest" />
-      <Field name="reduceResourceRequest" />
+      <Field name="mapResourceReqt" />
+      <Field name="reduceResourceReqt" />
       <Field name="maxReduceRampupLimit" />
       <Field name="reduceSlowStart" />
      </Or>

+ 46 - 108
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java

@@ -71,7 +71,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.client.api.NMTokenCache;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.RackResolver;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -142,21 +141,15 @@ public class RMContainerAllocator extends RMContainerRequestor
   private int lastCompletedTasks = 0;
   
   private boolean recalculateReduceSchedule = false;
-  private int mapResourceRequest;//memory
-  private int reduceResourceRequest;//memory
+  private int mapResourceReqt;//memory
+  private int reduceResourceReqt;//memory
   
   private boolean reduceStarted = false;
   private float maxReduceRampupLimit = 0;
   private float maxReducePreemptionLimit = 0;
-  /**
-   * after this threshold, if the container request is not allocated, it is
-   * considered delayed.
-   */
-  private long allocationDelayThresholdMs = 0;
   private float reduceSlowStart = 0;
   private long retryInterval;
   private long retrystartTime;
-  private Clock clock;
 
   @VisibleForTesting
   protected BlockingQueue<ContainerAllocatorEvent> eventQueue
@@ -167,7 +160,6 @@ public class RMContainerAllocator extends RMContainerRequestor
   public RMContainerAllocator(ClientService clientService, AppContext context) {
     super(clientService, context);
     this.stopped = new AtomicBoolean(false);
-    this.clock = context.getClock();
   }
 
   @Override
@@ -182,9 +174,6 @@ public class RMContainerAllocator extends RMContainerRequestor
     maxReducePreemptionLimit = conf.getFloat(
         MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT,
         MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT);
-    allocationDelayThresholdMs = conf.getInt(
-        MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
-        MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms
     RackResolver.init(conf);
     retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
                                 MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
@@ -251,7 +240,7 @@ public class RMContainerAllocator extends RMContainerRequestor
           getJob().getTotalMaps(), completedMaps,
           scheduledRequests.maps.size(), scheduledRequests.reduces.size(), 
           assignedRequests.maps.size(), assignedRequests.reduces.size(),
-          mapResourceRequest, reduceResourceRequest,
+          mapResourceReqt, reduceResourceReqt,
           pendingReduces.size(), 
           maxReduceRampupLimit, reduceSlowStart);
       recalculateReduceSchedule = false;
@@ -273,18 +262,6 @@ public class RMContainerAllocator extends RMContainerRequestor
     scheduleStats.log("Final Stats: ");
   }
 
-  @Private
-  @VisibleForTesting
-  AssignedRequests getAssignedRequests() {
-    return assignedRequests;
-  }
-
-  @Private
-  @VisibleForTesting
-  ScheduledRequests getScheduledRequests() {
-    return scheduledRequests;
-  }
-
   public boolean getIsReduceStarted() {
     return reduceStarted;
   }
@@ -320,16 +297,16 @@ public class RMContainerAllocator extends RMContainerRequestor
       int supportedMaxContainerCapability =
           getMaxContainerCapability().getMemory();
       if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) {
-        if (mapResourceRequest == 0) {
-          mapResourceRequest = reqEvent.getCapability().getMemory();
+        if (mapResourceReqt == 0) {
+          mapResourceReqt = reqEvent.getCapability().getMemory();
           eventHandler.handle(new JobHistoryEvent(jobId, 
               new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
-                  mapResourceRequest)));
-          LOG.info("mapResourceRequest:"+ mapResourceRequest);
-          if (mapResourceRequest > supportedMaxContainerCapability) {
+              mapResourceReqt)));
+          LOG.info("mapResourceReqt:"+mapResourceReqt);
+          if (mapResourceReqt > supportedMaxContainerCapability) {
             String diagMsg = "MAP capability required is more than the supported " +
-            "max container capability in the cluster. Killing the Job. mapResourceRequest: " +
-                mapResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability;
+            "max container capability in the cluster. Killing the Job. mapResourceReqt: " + 
+            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
             LOG.info(diagMsg);
             eventHandler.handle(new JobDiagnosticsUpdateEvent(
                 jobId, diagMsg));
@@ -337,20 +314,20 @@ public class RMContainerAllocator extends RMContainerRequestor
           }
         }
         //set the rounded off memory
-        reqEvent.getCapability().setMemory(mapResourceRequest);
+        reqEvent.getCapability().setMemory(mapResourceReqt);
         scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
       } else {
-        if (reduceResourceRequest == 0) {
-          reduceResourceRequest = reqEvent.getCapability().getMemory();
+        if (reduceResourceReqt == 0) {
+          reduceResourceReqt = reqEvent.getCapability().getMemory();
           eventHandler.handle(new JobHistoryEvent(jobId, 
               new NormalizedResourceEvent(
                   org.apache.hadoop.mapreduce.TaskType.REDUCE,
-                  reduceResourceRequest)));
-          LOG.info("reduceResourceRequest:"+ reduceResourceRequest);
-          if (reduceResourceRequest > supportedMaxContainerCapability) {
+              reduceResourceReqt)));
+          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
+          if (reduceResourceReqt > supportedMaxContainerCapability) {
             String diagMsg = "REDUCE capability required is more than the " +
             		"supported max container capability in the cluster. Killing the " +
-            		"Job. reduceResourceRequest: " + reduceResourceRequest +
+            		"Job. reduceResourceReqt: " + reduceResourceReqt +
             		" maxContainerCapability:" + supportedMaxContainerCapability;
             LOG.info(diagMsg);
             eventHandler.handle(new JobDiagnosticsUpdateEvent(
@@ -359,7 +336,7 @@ public class RMContainerAllocator extends RMContainerRequestor
           }
         }
         //set the rounded off memory
-        reqEvent.getCapability().setMemory(reduceResourceRequest);
+        reqEvent.getCapability().setMemory(reduceResourceReqt);
         if (reqEvent.getEarlierAttemptFailed()) {
           //add to the front of queue for fail fast
           pendingReduces.addFirst(new ContainerRequest(reqEvent, PRIORITY_REDUCE));
@@ -407,22 +384,8 @@ public class RMContainerAllocator extends RMContainerRequestor
     return host;
   }
 
-  @Private
-  @VisibleForTesting
-  synchronized void setReduceResourceRequest(int mem) {
-    this.reduceResourceRequest = mem;
-  }
-
-  @Private
-  @VisibleForTesting
-  synchronized void setMapResourceRequest(int mem) {
-    this.mapResourceRequest = mem;
-  }
-
-  @Private
-  @VisibleForTesting
-  void preemptReducesIfNeeded() {
-    if (reduceResourceRequest == 0) {
+  private void preemptReducesIfNeeded() {
+    if (reduceResourceReqt == 0) {
       return; //no reduces
     }
     //check if reduces have taken over the whole cluster and there are 
@@ -430,9 +393,9 @@ public class RMContainerAllocator extends RMContainerRequestor
     if (scheduledRequests.maps.size() > 0) {
       int memLimit = getMemLimit();
       int availableMemForMap = memLimit - ((assignedRequests.reduces.size() -
-          assignedRequests.preemptionWaitingReduces.size()) * reduceResourceRequest);
+          assignedRequests.preemptionWaitingReduces.size()) * reduceResourceReqt);
       //availableMemForMap must be sufficient to run atleast 1 map
-      if (availableMemForMap < mapResourceRequest) {
+      if (availableMemForMap < mapResourceReqt) {
         //to make sure new containers are given to maps and not reduces
         //ramp down all scheduled reduces if any
         //(since reduces are scheduled at higher priority than maps)
@@ -442,39 +405,21 @@ public class RMContainerAllocator extends RMContainerRequestor
         }
         scheduledRequests.reduces.clear();
         
-        //do further checking to find the number of map requests that were
-        //hanging around for a while
-        int hangingMapRequests = getNumOfHangingRequests(scheduledRequests.maps);
-        if (hangingMapRequests > 0) {
-          //preempt for making space for at least one map
-          int premeptionLimit = Math.max(mapResourceRequest,
-              (int) (maxReducePreemptionLimit * memLimit));
-
-          int preemptMem = Math.min(hangingMapRequests * mapResourceRequest,
-              premeptionLimit);
-
-          int toPreempt = (int) Math.ceil((float) preemptMem / reduceResourceRequest);
-          toPreempt = Math.min(toPreempt, assignedRequests.reduces.size());
-
-          LOG.info("Going to preempt " + toPreempt + " due to lack of space for maps");
-          assignedRequests.preemptReduce(toPreempt);
-        }
+        //preempt for making space for atleast one map
+        int premeptionLimit = Math.max(mapResourceReqt, 
+            (int) (maxReducePreemptionLimit * memLimit));
+        
+        int preemptMem = Math.min(scheduledRequests.maps.size() * mapResourceReqt, 
+            premeptionLimit);
+        
+        int toPreempt = (int) Math.ceil((float) preemptMem/reduceResourceReqt);
+        toPreempt = Math.min(toPreempt, assignedRequests.reduces.size());
+        
+        LOG.info("Going to preempt " + toPreempt);
+        assignedRequests.preemptReduce(toPreempt);
       }
     }
   }
-
-  private int getNumOfHangingRequests(Map<TaskAttemptId, ContainerRequest> requestMap) {
-    if (allocationDelayThresholdMs <= 0)
-      return requestMap.size();
-    int hangingRequests = 0;
-    long currTime = clock.getTime();
-    for (ContainerRequest request: requestMap.values()) {
-      long delay = currTime - request.requestTimeMs;
-      if (delay > allocationDelayThresholdMs)
-        hangingRequests++;
-    }
-    return hangingRequests;
-  }
   
   @Private
   public void scheduleReduces(
@@ -751,13 +696,11 @@ public class RMContainerAllocator extends RMContainerRequestor
   @Private
   public int getMemLimit() {
     int headRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;
-    return headRoom + assignedRequests.maps.size() * mapResourceRequest +
-       assignedRequests.reduces.size() * reduceResourceRequest;
+    return headRoom + assignedRequests.maps.size() * mapResourceReqt + 
+       assignedRequests.reduces.size() * reduceResourceReqt;
   }
-
-  @Private
-  @VisibleForTesting
-  class ScheduledRequests {
+  
+  private class ScheduledRequests {
     
     private final LinkedList<TaskAttemptId> earlierFailedMaps = 
       new LinkedList<TaskAttemptId>();
@@ -767,8 +710,7 @@ public class RMContainerAllocator extends RMContainerRequestor
       new HashMap<String, LinkedList<TaskAttemptId>>();
     private final Map<String, LinkedList<TaskAttemptId>> mapsRackMapping = 
       new HashMap<String, LinkedList<TaskAttemptId>>();
-    @VisibleForTesting
-    final Map<TaskAttemptId, ContainerRequest> maps =
+    private final Map<TaskAttemptId, ContainerRequest> maps = 
       new LinkedHashMap<TaskAttemptId, ContainerRequest>();
     
     private final LinkedHashMap<TaskAttemptId, ContainerRequest> reduces = 
@@ -864,22 +806,22 @@ public class RMContainerAllocator extends RMContainerRequestor
         int allocatedMemory = allocated.getResource().getMemory();
         if (PRIORITY_FAST_FAIL_MAP.equals(priority) 
             || PRIORITY_MAP.equals(priority)) {
-          if (allocatedMemory < mapResourceRequest
+          if (allocatedMemory < mapResourceReqt
               || maps.isEmpty()) {
             LOG.info("Cannot assign container " + allocated 
                 + " for a map as either "
-                + " container memory less than required " + mapResourceRequest
+                + " container memory less than required " + mapResourceReqt
                 + " or no pending map tasks - maps.isEmpty=" 
                 + maps.isEmpty()); 
             isAssignable = false; 
           }
         } 
         else if (PRIORITY_REDUCE.equals(priority)) {
-          if (allocatedMemory < reduceResourceRequest
+          if (allocatedMemory < reduceResourceReqt
               || reduces.isEmpty()) {
             LOG.info("Cannot assign container " + allocated 
                 + " for a reduce as either "
-                + " container memory less than required " + reduceResourceRequest
+                + " container memory less than required " + reduceResourceReqt
                 + " or no pending reduce tasks - reduces.isEmpty=" 
                 + reduces.isEmpty()); 
             isAssignable = false;
@@ -1158,18 +1100,14 @@ public class RMContainerAllocator extends RMContainerRequestor
     }
   }
 
-  @Private
-  @VisibleForTesting
-  class AssignedRequests {
+  private class AssignedRequests {
     private final Map<ContainerId, TaskAttemptId> containerToAttemptMap =
       new HashMap<ContainerId, TaskAttemptId>();
     private final LinkedHashMap<TaskAttemptId, Container> maps = 
       new LinkedHashMap<TaskAttemptId, Container>();
-    @VisibleForTesting
-    final LinkedHashMap<TaskAttemptId, Container> reduces =
+    private final LinkedHashMap<TaskAttemptId, Container> reduces = 
       new LinkedHashMap<TaskAttemptId, Container>();
-    @VisibleForTesting
-    final Set<TaskAttemptId> preemptionWaitingReduces =
+    private final Set<TaskAttemptId> preemptionWaitingReduces = 
       new HashSet<TaskAttemptId>();
     
     void add(Container container, TaskAttemptId tId) {

+ 4 - 27
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java

@@ -29,10 +29,8 @@ import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -98,8 +96,6 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     super(clientService, context);
   }
 
-  @Private
-  @VisibleForTesting
   static class ContainerRequest {
     final TaskAttemptId attemptID;
     final Resource capability;
@@ -107,39 +103,20 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     final String[] racks;
     //final boolean earlierAttemptFailed;
     final Priority priority;
-    /**
-     * the time when this request object was formed; can be used to avoid
-     * aggressive preemption for recently placed requests
-     */
-    final long requestTimeMs;
-
+    
     public ContainerRequest(ContainerRequestEvent event, Priority priority) {
       this(event.getAttemptID(), event.getCapability(), event.getHosts(),
           event.getRacks(), priority);
     }
-
-    public ContainerRequest(ContainerRequestEvent event, Priority priority,
-                            long requestTimeMs) {
-      this(event.getAttemptID(), event.getCapability(), event.getHosts(),
-          event.getRacks(), priority, requestTimeMs);
-    }
-
-    public ContainerRequest(TaskAttemptId attemptID,
-                            Resource capability, String[] hosts, String[] racks,
-                            Priority priority) {
-      this(attemptID, capability, hosts, racks, priority,
-          System.currentTimeMillis());
-    }
-
+    
     public ContainerRequest(TaskAttemptId attemptID,
-        Resource capability, String[] hosts, String[] racks,
-        Priority priority, long requestTimeMs) {
+        Resource capability, String[] hosts, String[] racks, 
+        Priority priority) {
       this.attemptID = attemptID;
       this.capability = capability;
       this.hosts = hosts;
       this.racks = racks;
       this.priority = priority;
-      this.requestTimeMs = requestTimeMs;
     }
     
     public String toString() {

+ 5 - 115
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java → hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java

@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.mapreduce.v2.app.rm;
+package org.apache.hadoop.mapreduce.v2.app;
 
 import static org.mockito.Matchers.anyFloat;
 import static org.mockito.Matchers.anyInt;
@@ -52,10 +52,6 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.v2.app.AppContext;
-import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
-import org.apache.hadoop.mapreduce.v2.app.ControlledClock;
-import org.apache.hadoop.mapreduce.v2.app.MRApp;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
@@ -69,6 +65,10 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
@@ -79,7 +79,6 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -419,115 +418,6 @@ public class TestRMContainerAllocator {
         killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC));
   }
 
-  @Test(timeout = 30000)
-  public void testPreemptReducers() throws Exception {
-    LOG.info("Running testPreemptReducers");
-
-    Configuration conf = new Configuration();
-    MyResourceManager rm = new MyResourceManager(conf);
-    rm.start();
-    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
-        .getDispatcher();
-
-    // Submit the application
-    RMApp app = rm.submitApp(1024);
-    dispatcher.await();
-
-    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
-    amNodeManager.nodeHeartbeat(true);
-    dispatcher.await();
-
-    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
-        .getAppAttemptId();
-    rm.sendAMLaunched(appAttemptId);
-    dispatcher.await();
-
-    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
-    Job mockJob = mock(Job.class);
-    when(mockJob.getReport()).thenReturn(
-        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
-    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
-        appAttemptId, mockJob, new SystemClock());
-    allocator.setMapResourceRequest(1024);
-    allocator.setReduceResourceRequest(1024);
-    RMContainerAllocator.AssignedRequests assignedRequests =
-        allocator.getAssignedRequests();
-    RMContainerAllocator.ScheduledRequests scheduledRequests =
-        allocator.getScheduledRequests();
-    ContainerRequestEvent event1 =
-        createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
-    scheduledRequests.maps.put(mock(TaskAttemptId.class),
-        new RMContainerRequestor.ContainerRequest(event1, null));
-    assignedRequests.reduces.put(mock(TaskAttemptId.class),
-        mock(Container.class));
-
-    allocator.preemptReducesIfNeeded();
-    Assert.assertEquals("The reducer is not preempted",
-        1, assignedRequests.preemptionWaitingReduces.size());
-  }
-
-  @Test(timeout = 30000)
-  public void testNonAggressivelyPreemptReducers() throws Exception {
-    LOG.info("Running testPreemptReducers");
-
-    final int preemptThreshold = 2; //sec
-    Configuration conf = new Configuration();
-    conf.setInt(
-        MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
-        preemptThreshold);
-
-    MyResourceManager rm = new MyResourceManager(conf);
-    rm.start();
-    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
-        .getDispatcher();
-
-    // Submit the application
-    RMApp app = rm.submitApp(1024);
-    dispatcher.await();
-
-    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
-    amNodeManager.nodeHeartbeat(true);
-    dispatcher.await();
-
-    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
-        .getAppAttemptId();
-    rm.sendAMLaunched(appAttemptId);
-    dispatcher.await();
-
-    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
-    Job mockJob = mock(Job.class);
-    when(mockJob.getReport()).thenReturn(
-        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
-            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
-    ControlledClock clock = new ControlledClock(null);
-    clock.setTime(1);
-    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
-        appAttemptId, mockJob, clock);
-    allocator.setMapResourceRequest(1024);
-    allocator.setReduceResourceRequest(1024);
-    RMContainerAllocator.AssignedRequests assignedRequests =
-        allocator.getAssignedRequests();
-    RMContainerAllocator.ScheduledRequests scheduledRequests =
-        allocator.getScheduledRequests();
-    ContainerRequestEvent event1 =
-        createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
-    scheduledRequests.maps.put(mock(TaskAttemptId.class),
-        new RMContainerRequestor.ContainerRequest(event1, null, clock.getTime()));
-    assignedRequests.reduces.put(mock(TaskAttemptId.class),
-        mock(Container.class));
-
-    clock.setTime(clock.getTime() + 1);
-    allocator.preemptReducesIfNeeded();
-    Assert.assertEquals("The reducer is aggressively preeempted", 0,
-        assignedRequests.preemptionWaitingReduces.size());
-
-    clock.setTime(clock.getTime() + (preemptThreshold) * 1000);
-    allocator.preemptReducesIfNeeded();
-    Assert.assertEquals("The reducer is not preeempted", 1,
-        assignedRequests.preemptionWaitingReduces.size());
-  }
-
   @Test
   public void testMapReduceScheduling() throws Exception {
 

+ 1 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java

@@ -568,17 +568,7 @@ public interface MRJobConfig {
       MR_AM_PREFIX + "history.use-batched-flush.queue-size.threshold";
   public static final int DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD =
       50;
-
-  /**
-   * The threshold in terms of seconds after which an unsatisfied mapper request
-   * triggers reducer preemption to free space. Default 0 implies that the reduces
-   * should be preempted immediately after allocation if there is currently no
-   * room for newly allocated mappers.
-   */
-  public static final String MR_JOB_REDUCER_PREEMPT_DELAY_SEC =
-      "mapreduce.job.reducer.preempt.delay.sec";
-  public static final int DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC = 0;
-
+  
   public static final String MR_AM_ENV =
       MR_AM_PREFIX + "env";
   

+ 0 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml

@@ -268,17 +268,6 @@
   </description>
 </property>
 
-<property>
-  <name>mapreduce.job.reducer.preempt.delay.sec</name>
-  <value>0</value>
-  <description>The threshold in terms of seconds after which an unsatisfied mapper 
-  request triggers reducer preemption to free space. Default 0 implies that the 
-  reduces should be preempted immediately after allocation if there is currently no
-  room for newly allocated mappers.
->>>>>>> a29b1c2... MAPREDUCE-5844. Add a configurable delay to reducer-preemption. (Maysam Yabandeh via kasha)
-  </description>
-</property>
-
 <property>
   <name>mapreduce.jobtracker.jobhistory.block.size</name>
   <value>3145728</value>