瀏覽代碼

Reverting Changes to do rack resolutions in the RM and in the AM (ddas)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/MR-279@1135878 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar 14 年之前
父節點
當前提交
d2d94248e2

+ 0 - 1
mapreduce/CHANGES.txt

@@ -5,7 +5,6 @@ Trunk (unreleased changes)
 
 
     MAPREDUCE-279
     MAPREDUCE-279
 
 
-
     MAPREDUCE-2569. Ensure root queue allocated 100% capacity. (Jonathan
     MAPREDUCE-2569. Ensure root queue allocated 100% capacity. (Jonathan
     Eagles via cdouglas)
     Eagles via cdouglas)
 
 

+ 1 - 7
mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -114,7 +114,6 @@ import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.util.RackResolver;
 
 
 /**
 /**
  * Implementation of TaskAttempt interface.
  * Implementation of TaskAttempt interface.
@@ -418,7 +417,6 @@ public abstract class TaskAttemptImpl implements
     this.resourceCapability = recordFactory.newRecordInstance(Resource.class);
     this.resourceCapability = recordFactory.newRecordInstance(Resource.class);
     this.resourceCapability.setMemory(getMemoryRequired(conf, taskId.getTaskType()));
     this.resourceCapability.setMemory(getMemoryRequired(conf, taskId.getTaskType()));
     this.dataLocalHosts = dataLocalHosts;
     this.dataLocalHosts = dataLocalHosts;
-    RackResolver.init(conf);
 
 
     // This "this leak" is okay because the retained pointer is in an
     // This "this leak" is okay because the retained pointer is in an
     //  instance variable.
     //  instance variable.
@@ -874,6 +872,7 @@ public abstract class TaskAttemptImpl implements
     return tauce;
     return tauce;
   }
   }
 
 
+  private static String[] racks = new String[] {NetworkTopology.DEFAULT_RACK};
   private static class RequestContainerTransition implements
   private static class RequestContainerTransition implements
       SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
       SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
     boolean rescheduled = false;
     boolean rescheduled = false;
@@ -893,11 +892,6 @@ public abstract class TaskAttemptImpl implements
                 taskAttempt.attemptId, 
                 taskAttempt.attemptId, 
                 taskAttempt.resourceCapability));
                 taskAttempt.resourceCapability));
       } else {
       } else {
-        int i = 0;
-        String[] racks = new String[taskAttempt.dataLocalHosts.length];
-        for (String host : taskAttempt.dataLocalHosts) {
-          racks[i++] = RackResolver.resolve(host).getNetworkLocation();
-        }
         taskAttempt.eventHandler.handle(
         taskAttempt.eventHandler.handle(
             new ContainerRequestEvent(taskAttempt.attemptId, 
             new ContainerRequestEvent(taskAttempt.attemptId, 
                 taskAttempt.resourceCapability, 
                 taskAttempt.resourceCapability, 

+ 3 - 3
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/RMResourceTrackerImpl.java

@@ -36,7 +36,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.yarn.Lock;
 import org.apache.hadoop.yarn.Lock;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -62,7 +64,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceListener;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.service.AbstractService;
-import org.apache.hadoop.yarn.util.RackResolver;
 
 
 /**
 /**
  * This class is responsible for the interaction with the NodeManagers.
  * This class is responsible for the interaction with the NodeManagers.
@@ -152,7 +153,6 @@ NodeTracker, ClusterTracker {
         this.hostsReader = null;
         this.hostsReader = null;
       }
       }
     }
     }
-    RackResolver.init(conf);
   }
   }
 
 
   private void printConfiguredHosts() {
   private void printConfiguredHosts() {
@@ -194,7 +194,7 @@ NodeTracker, ClusterTracker {
    */
    */
   @Lock(Lock.NoLock.class)
   @Lock(Lock.NoLock.class)
   public static Node resolve(String hostName) {
   public static Node resolve(String hostName) {
-    return RackResolver.resolve(hostName);
+    return new NodeBase(hostName, NetworkTopology.DEFAULT_RACK);
   }
   }
   
   
   @Lock(Lock.NoLock.class)
   @Lock(Lock.NoLock.class)