Przeglądaj źródła

Replacing NodeId integers with host:pair.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/MR-279@1153433 13f79535-47bb-0310-9956-ffa450edef68
Vinod Kumar Vavilapalli 14 lat temu
rodzic
commit
43f8ce2a0a
40 zmienionych plików z 287 dodań i 497 usunięć
  1. 2 1
      mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  2. 0 1
      mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
  3. 0 1
      mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
  4. 3 3
      mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
  5. 0 164
      mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/StaticContainerAllocator.java
  6. 1 1
      mapreduce/mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
  7. 0 2
      mapreduce/mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
  8. 0 2
      mapreduce/mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
  9. 0 2
      mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
  10. 7 4
      mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java
  11. 2 4
      mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
  12. 2 21
      mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
  13. 64 5
      mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java
  14. 0 16
      mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java
  15. 8 9
      mapreduce/yarn/yarn-api/src/main/proto/yarn_protos.proto
  16. 0 2
      mapreduce/yarn/yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java
  17. 3 4
      mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
  18. 24 20
      mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
  19. 0 4
      mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java
  20. 0 35
      mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java
  21. 1 2
      mapreduce/yarn/yarn-server/yarn-server-common/src/main/proto/yarn_server_common_protos.proto
  22. 1 2
      mapreduce/yarn/yarn-server/yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
  23. 4 5
      mapreduce/yarn/yarn-server/yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
  24. 10 10
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
  25. 0 7
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNodeRemovalListener.java
  26. 32 90
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
  27. 1 1
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
  28. 6 4
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java
  29. 46 41
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java
  30. 3 0
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
  31. 30 9
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
  32. 9 1
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
  33. 1 1
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
  34. 1 1
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
  35. 1 1
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSApp.java
  36. 2 2
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
  37. 2 2
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
  38. 2 1
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
  39. 16 13
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
  40. 3 3
      mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java

+ 2 - 1
mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -1049,7 +1049,8 @@ public abstract class TaskAttemptImpl implements
       TaskAttemptContainerAssignedEvent cEvent = 
         (TaskAttemptContainerAssignedEvent) event;
       taskAttempt.containerID = cEvent.getContainer().getId();
-      taskAttempt.containerMgrAddress = cEvent.getContainer().getContainerManagerAddress();
+      taskAttempt.containerMgrAddress = cEvent.getContainer().getNodeId()
+          .toString();
       taskAttempt.nodeHttpAddress = cEvent.getContainer().getNodeHttpAddress();
       taskAttempt.containerToken = cEvent.getContainer().getContainerToken();
       taskAttempt.assignedCapability = cEvent.getContainer().getResource();

+ 0 - 1
mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java

@@ -75,7 +75,6 @@ public class LocalContainerAllocator extends RMCommunicator
       Container container = recordFactory.newRecordInstance(Container.class);
       container.setId(cID);
       container.setNodeId(null);
-      container.setContainerManagerAddress("localhost");
       container.setContainerToken(null);
       container.setNodeHttpAddress("localhost:9999");
       // send the container-assigned event to task attempt

+ 0 - 1
mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java

@@ -359,7 +359,6 @@ public class RecoveryService extends CompositeService implements Recovery {
       container.setId(cId);
       container.setNodeId(recordFactory
           .newRecordInstance(NodeId.class));
-      container.setContainerManagerAddress("localhost");
       container.setContainerToken(null);
       container.setNodeHttpAddress(attemptInfo.getHostname() + ":" + 
           attemptInfo.getHttpPort());

+ 3 - 3
mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java

@@ -568,7 +568,7 @@ public class RMContainerAllocator extends RMContainerRequestor
           
           LOG.info("Assigned container (" + allocated + ") " +
               " to task " + assigned.attemptID +
-              " on node " + allocated.getContainerManagerAddress());
+              " on node " + allocated.getNodeId().toString());
         } else {
           //not assigned to any request, release the container
           LOG.info("Releasing unassigned container " + allocated);
@@ -653,7 +653,7 @@ public class RMContainerAllocator extends RMContainerRequestor
       ContainerRequest assigned = null;
       while (assigned == null && maps.size() > 0
           && allocated.getResource().getMemory() >= mapResourceReqt) {
-        String host = getHost(allocated.getContainerManagerAddress());
+        String host = getHost(allocated.getNodeId().toString());
         LinkedList<TaskAttemptId> list = mapsHostMapping.get(host);
         while (list != null && list.size() > 0) {
           LOG.info("Host matched to the request list " + host);
@@ -712,7 +712,7 @@ public class RMContainerAllocator extends RMContainerRequestor
       new HashSet<TaskAttemptId>();
     
     void add(Container container, TaskAttemptId tId) {
-      LOG.info("Assigned container " + container.getContainerManagerAddress() 
+      LOG.info("Assigned container " + container.getNodeId().toString()
           + " to " + tId);
       containerToAttemptMap.put(container.getId(), tId);
       if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {

+ 0 - 164
mapreduce/mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/StaticContainerAllocator.java

@@ -1,164 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.mapreduce.v2.app.rm;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
-import org.apache.hadoop.mapreduce.v2.app.AppContext;
-import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
-import org.apache.hadoop.yarn.YarnException;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.service.AbstractService;
-
-/**
- * Reads the static list of NodeManager from config file and allocate 
- * containers.
- */
-public class StaticContainerAllocator extends AbstractService 
-    implements ContainerAllocator {
-
-  private static final Log LOG = 
-    LogFactory.getLog(StaticContainerAllocator.class);
-
-  private AppContext context;
-  private volatile boolean stopped;
-  private BlockingQueue<ContainerAllocatorEvent> eventQueue =
-      new LinkedBlockingQueue<ContainerAllocatorEvent>();
-  private Thread allocatorThread;
-
-  private int containerCount;
-
-  private List<String> containerMgrList;
-  private int nextIndex;
-
-  public StaticContainerAllocator(AppContext context) {
-    super("StaticContainerAllocator");
-    this.context = context;
-  }
-
-  protected List<String> getContainerMgrList(Configuration conf)
-      throws IOException {
-    Path jobSubmitDir = FileContext.getLocalFSFileContext().makeQualified(
-        new Path(new File(MRConstants.JOB_SUBMIT_DIR).getAbsolutePath()));
-    Path jobConfFile = new Path(jobSubmitDir, MRConstants.JOB_CONF_FILE);
-    conf.addResource(jobConfFile);
-    String[] containerMgrHosts = 
-      conf.getStrings(MRConstants.NM_HOSTS_CONF_KEY);
-    return Arrays.asList(containerMgrHosts);
-  }
-
-  @Override
-  public void init(Configuration conf) {
-    try {
-      containerMgrList = getContainerMgrList(conf);
-    } catch (IOException e) {
-      throw new YarnException("Cannot get container-managers list ", e);
-    }
-
-    if (containerMgrList.size() == 0) {
-      throw new YarnException("No of Container Managers are zero.");
-    }
-    super.init(conf);
-  }
-
-  @Override
-  public void start() {
-    allocatorThread = new Thread(new Allocator());
-    allocatorThread.start();
-    super.start();
-  }
-
-  @Override
-  public void stop() {
-    stopped = true;
-    allocatorThread.interrupt();
-    try {
-      allocatorThread.join();
-    } catch (InterruptedException ie) {
-      LOG.debug("Interruped Exception while stopping", ie);
-    }
-    super.stop();
-  }
-
-  @Override
-  public void handle(ContainerAllocatorEvent event) {
-    try {
-      eventQueue.put(event);
-    } catch (InterruptedException e) {
-      throw new YarnException(e);
-    }
-  }
-
-  private class Allocator implements Runnable {
-    @Override
-    public void run() {
-      ContainerAllocatorEvent event = null;
-      while (!stopped && !Thread.currentThread().isInterrupted()) {
-        try {
-          event = eventQueue.take();
-          LOG.info("Processing the event " + event.toString());
-          allocate(event);
-        } catch (InterruptedException e) {
-          return;
-        }
-      }
-    }
-
-    private void allocate(ContainerAllocatorEvent event) {
-      // allocate the container in round robin fashion on
-      // container managers
-      if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
-        if (nextIndex < containerMgrList.size()) {
-          String containerMgr = containerMgrList.get(nextIndex);
-          ContainerId containerID = generateContainerID();
-          
-          Container container = RecordFactoryProvider.getRecordFactory(null)
-              .newRecordInstance(Container.class);
-          container.setId(containerID);
-          container.setContainerManagerAddress(containerMgr);
-          container.setContainerToken(null);
-          container.setNodeHttpAddress("localhost:9999");
-          context.getEventHandler().handle(
-            new TaskAttemptContainerAssignedEvent(
-                event.getAttemptID(), container));
-        }
-      }
-    }
-
-    private ContainerId generateContainerID() {
-      ContainerId cId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ContainerId.class);
-      cId.setAppId(context.getApplicationID());
-      cId.setId(containerCount++);
-      return cId;
-    }
-  }
-}

+ 1 - 1
mapreduce/mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java

@@ -384,7 +384,7 @@ public class TypeConverter {
   private static final String TT_NAME_PREFIX = "tracker_";
   public static TaskTrackerInfo fromYarn(NodeReport node) {
     TaskTrackerInfo taskTracker = 
-      new TaskTrackerInfo(TT_NAME_PREFIX + node.getNodeAddress());
+      new TaskTrackerInfo(TT_NAME_PREFIX + node.getNodeId().toString());
     return taskTracker;
   }
 

+ 0 - 2
mapreduce/mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java

@@ -47,8 +47,6 @@ public interface MRConstants {
 
   public static final String APPS_STAGING_DIR_KEY = "yarn.apps.stagingDir";
 
-  public static final String NM_HOSTS_CONF_KEY = "NM_HOSTS";
-
   // The token file for the application. Should contain tokens for access to
   // remote file system and may optionally contain application specific tokens.
   // For now, generated by the AppManagers and used by NodeManagers and the

+ 0 - 2
mapreduce/mr-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java

@@ -61,8 +61,6 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
   @Override
   public void init(Configuration conf) {
     conf.set(MRConfig.FRAMEWORK_NAME, "yarn");
-    conf.setStrings(MRConstants.NM_HOSTS_CONF_KEY,
-        new String[] { NMConfig.DEFAULT_NM_BIND_ADDRESS });
     conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
     conf.set(MRConstants.APPS_STAGING_DIR_KEY, new File(getTestWorkDir(),
         "apps_staging_dir/${user.name}/").getAbsolutePath());

+ 0 - 2
mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java

@@ -22,7 +22,6 @@ package org.apache.hadoop.yarn.api.records;
 public interface Container extends Comparable<Container> {
   ContainerId getId();
   NodeId getNodeId();
-  String getContainerManagerAddress();
   String getNodeHttpAddress();
   Resource getResource();
   ContainerState getState();
@@ -31,7 +30,6 @@ public interface Container extends Comparable<Container> {
   
   void setId(ContainerId id);
   void setNodeId(NodeId nodeId);
-  void setContainerManagerAddress(String containerManagerAddress);
   void setNodeHttpAddress(String nodeHttpAddress);
   void setResource(Resource resource);
   void setState(ContainerState state);

+ 7 - 4
mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java

@@ -1,7 +1,10 @@
 package org.apache.hadoop.yarn.api.records;
 
-public interface NodeId {
-  public abstract int getId();
-  
-  public abstract void setId(int id);
+public interface NodeId extends Comparable<NodeId> {
+
+  String getHost();
+  void setHost(String host);
+
+  int getPort();
+  void setPort(int port);
 }

+ 2 - 4
mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java

@@ -1,8 +1,8 @@
 package org.apache.hadoop.yarn.api.records;
 
 public interface NodeReport {
-  String getNodeAddress();
-  void setNodeAddress(String nodeAddress);
+  NodeId getNodeId();
+  void setNodeId(NodeId nodeId);
   String getHttpAddress();
   void setHttpAddress(String httpAddress);
   String getRackName();
@@ -13,8 +13,6 @@ public interface NodeReport {
   void setCapability(Resource capability);
   int getNumContainers();
   void setNumContainers(int numContainers);
-  NodeId getNodeId();
-  void setNodeId(NodeId nodeId);
   NodeHealthStatus getNodeHealthStatus();
   void setNodeHealthStatus(NodeHealthStatus nodeHealthStatus);
 }

+ 2 - 21
mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java

@@ -172,25 +172,6 @@ public class ContainerPBImpl extends ProtoBase<ContainerProto> implements Contai
     this.containerId = id;
   }
 
-  @Override
-  public String getContainerManagerAddress() {
-    ContainerProtoOrBuilder p = viaProto ? proto : builder;
-    if (!p.hasContainerManagerAddress()) {
-      return null;
-    }
-    return (p.getContainerManagerAddress());
-  }
-
-  @Override
-  public void setContainerManagerAddress(String containerManagerAddress) {
-    maybeInitBuilder();
-    if (containerManagerAddress == null) {
-      builder.clearContainerManagerAddress();
-      return;
-    }
-    builder.setContainerManagerAddress((containerManagerAddress));
-  }
-
   @Override
   public String getNodeHttpAddress() {
     ContainerProtoOrBuilder p = viaProto ? proto : builder;
@@ -324,7 +305,7 @@ public class ContainerPBImpl extends ProtoBase<ContainerProto> implements Contai
   @Override
   public int compareTo(Container other) {
     if (this.getId().compareTo(other.getId()) == 0) {
-      if (this.getContainerManagerAddress().compareTo(other.getContainerManagerAddress()) == 0) {
+      if (this.getNodeId().compareTo(other.getNodeId()) == 0) {
         if (this.getResource().compareTo(other.getResource()) == 0) {
           if (this.getState().compareTo(other.getState()) == 0) {
             //ContainerToken
@@ -336,7 +317,7 @@ public class ContainerPBImpl extends ProtoBase<ContainerProto> implements Contai
           return this.getResource().compareTo(other.getResource());
         }
       } else {
-        return this.getContainerManagerAddress().compareTo(other.getContainerManagerAddress());
+        return this.getNodeId().compareTo(other.getNodeId());
       }
     } else {
       return this.getId().compareTo(other.getId());

+ 64 - 5
mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java

@@ -35,19 +35,78 @@ public class NodeIdPBImpl extends ProtoBase<NodeIdProto> implements NodeId {
     viaProto = false;
   }
     
-  
   @Override
-  public int getId() {
+  public String getHost() {
+    NodeIdProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.getHost());
+  }
+
+  @Override
+  public void setHost(String host) {
+    maybeInitBuilder();
+    builder.setHost((host));
+  }
+
+  @Override
+  public int getPort() {
     NodeIdProtoOrBuilder p = viaProto ? proto : builder;
-    return (p.getId());
+    return (p.getPort());
   }
 
   @Override
-  public void setId(int id) {
+  public void setPort(int port) {
     maybeInitBuilder();
-    builder.setId((id));
+    builder.setPort((port));
+  }
+
+  @Override
+  public String toString() {
+    return this.getHost() + ":" + this.getPort();
+  }
+  
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = super.hashCode();
+    String host = this.getHost();
+    result = prime * result + ((host == null) ? 0 : host.hashCode());
+    result = prime * result + this.getPort();
+    return result;
   }
 
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (!super.equals(obj))
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    NodeIdPBImpl other = (NodeIdPBImpl) obj;
+    String host = this.getHost();
+    String otherHost = other.getHost();
+    if (host == null) {
+      if (otherHost != null)
+        return false;
+    } else if (!host.equals(otherHost))
+      return false;
+    if (this.getPort() != other.getPort())
+      return false;
+    return true;
+  }
 
+  @Override
+  public int compareTo(NodeId other) {
+    int hostCompare = this.getHost().compareTo(other.getHost());
+    if (hostCompare == 0) {
+      if (this.getPort() > other.getPort()) {
+        return 1;
+      } else if (this.getPort() < other.getPort()) {
+        return -1;
+      }
+      return 0;
+    }
+    return hostCompare;
+  }
 
 }  

+ 0 - 16
mapreduce/yarn/yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java

@@ -59,12 +59,6 @@ public class NodeReportPBImpl extends ProtoBase<NodeReportProto>
     return this.nodeHealthStatus;
   }
 
-  @Override
-  public String getNodeAddress() {
-    NodeReportProtoOrBuilder p = viaProto ? proto : builder;
-    return (p.hasNodeAddress()) ? p.getNodeAddress() : null;
-  }
-
   @Override
   public String getHttpAddress() {
     NodeReportProtoOrBuilder p = viaProto ? proto : builder;
@@ -135,16 +129,6 @@ public class NodeReportPBImpl extends ProtoBase<NodeReportProto>
     this.nodeHealthStatus = healthStatus;
   }
 
-  @Override
-  public void setNodeAddress(String nodeAddress) {
-    maybeInitBuilder();
-    if (nodeAddress == null) {
-      builder.clearNodeAddress();
-      return;
-    }
-    builder.setNodeAddress(nodeAddress);
-  }
-
   @Override
   public void setHttpAddress(String httpAddress) {
     maybeInitBuilder();

+ 8 - 9
mapreduce/yarn/yarn-api/src/main/proto/yarn_protos.proto

@@ -45,12 +45,11 @@ message ContainerTokenProto {
 message ContainerProto {
   optional ContainerIdProto id = 1;
   optional NodeIdProto nodeId = 2;
-  optional string container_manager_address = 3;
-  optional string node_http_address = 4;
-  optional ResourceProto resource = 5;
-  optional ContainerStateProto state = 6;
-  optional ContainerTokenProto container_token = 7;
-  optional ContainerStatusProto container_status = 8;
+  optional string node_http_address = 3;
+  optional ResourceProto resource = 4;
+  optional ContainerStateProto state = 5;
+  optional ContainerTokenProto container_token = 6;
+  optional ContainerStatusProto container_status = 7;
 }
 
 enum ApplicationStateProto {
@@ -122,7 +121,8 @@ message ApplicationReportProto {
 }
 
 message NodeIdProto {
-  optional int32 id = 1;
+  optional string host = 1;
+  optional int32 port = 2;
 }
 
 message NodeHealthStatusProto {
@@ -132,13 +132,12 @@ message NodeHealthStatusProto {
 }
 
 message NodeReportProto {
-  optional string nodeAddress = 1;
+  optional NodeIdProto nodeId = 1;
   optional string httpAddress = 2;
   optional string rackName = 3;
   optional ResourceProto used = 4;
   optional ResourceProto capability = 5;
   optional int32 numContainers = 6;
-  optional NodeIdProto nodeId = 7;
   optional NodeHealthStatusProto node_health_status = 8;
 }
 

+ 0 - 2
mapreduce/yarn/yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java

@@ -145,7 +145,6 @@ public class BuilderUtils {
     container.setId(c.getId());
     container.setContainerToken(c.getContainerToken());
     container.setNodeId(c.getNodeId());
-    container.setContainerManagerAddress(c.getContainerManagerAddress());
     container.setNodeHttpAddress(c.getNodeHttpAddress());
     container.setResource(c.getResource());
     container.setState(c.getState());
@@ -168,7 +167,6 @@ public class BuilderUtils {
     Container container = recordFactory.newRecordInstance(Container.class);
     container.setId(containerId);
     container.setNodeId(nodeId);
-    container.setContainerManagerAddress(containerManagerAddress);
     container.setNodeHttpAddress(nodeHttpAddress);
     container.setResource(resource);
     container.setState(ContainerState.INITIALIZING);

+ 3 - 4
mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java

@@ -18,16 +18,15 @@
 
 package org.apache.hadoop.yarn.server.api.protocolrecords;
 
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 
 public interface RegisterNodeManagerRequest {
-  String getHost();
-  int getContainerManagerPort();
+  NodeId getNodeId();
   int getHttpPort();
   Resource getResource();
   
-  void setHost(String host);
-  void setContainerManagerPort(int port);
+  void setNodeId(NodeId nodeId);
   void setHttpPort(int port);
   void setResource(Resource resource);
 }

+ 24 - 20
mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java

@@ -1,9 +1,12 @@
 package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
 
 
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProtoOrBuilder;
@@ -17,7 +20,7 @@ public class RegisterNodeManagerRequestPBImpl extends ProtoBase<RegisterNodeMana
   boolean viaProto = false;
   
   private Resource resource = null;
-  
+  private NodeId nodeId = null;
   
   public RegisterNodeManagerRequestPBImpl() {
     builder = RegisterNodeManagerRequestProto.newBuilder();
@@ -39,6 +42,10 @@ public class RegisterNodeManagerRequestPBImpl extends ProtoBase<RegisterNodeMana
     if (this.resource != null) {
       builder.setResource(convertToProtoFormat(this.resource));
     }
+    if (this.nodeId != null) {
+      builder.setNodeId(convertToProtoFormat(this.nodeId));
+    }
+
   }
 
   private void mergeLocalToProto() {
@@ -79,18 +86,24 @@ public class RegisterNodeManagerRequestPBImpl extends ProtoBase<RegisterNodeMana
   }
 
   @Override
-  public int getContainerManagerPort() {
+  public NodeId getNodeId() {
     RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
-    if (!p.hasContainerManagerPort()) {
-      return 0;
+    if (this.nodeId != null) {
+      return this.nodeId;
+    }
+    if (!p.hasNodeId()) {
+      return null;
     }
-    return (p.getContainerManagerPort());
+    this.nodeId = convertFromProtoFormat(p.getNodeId());
+    return this.nodeId;
   }
 
   @Override
-  public void setContainerManagerPort(int port) {
+  public void setNodeId(NodeId nodeId) {
     maybeInitBuilder();
-    builder.setContainerManagerPort(port);
+    if (nodeId == null) 
+      builder.clearNodeId();
+    this.nodeId = nodeId;
   }
 
   @Override
@@ -108,21 +121,12 @@ public class RegisterNodeManagerRequestPBImpl extends ProtoBase<RegisterNodeMana
     builder.setHttpPort(httpPort);
   }
 
-  @Override
-  public String getHost() {
-    RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
-    if (!p.hasHost()) {
-      return null;
-    }
-    return (p.getHost());
+  private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
+    return new NodeIdPBImpl(p);
   }
 
-  @Override
-  public void setHost(String host) {
-    maybeInitBuilder();
-    if (host == null) 
-      builder.clearHost();
-    builder.setHost((host));
+  private NodeIdProto convertToProtoFormat(NodeId t) {
+    return ((NodeIdPBImpl)t).getProto();
   }
 
   private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {

+ 0 - 4
mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/RegistrationResponse.java

@@ -19,12 +19,8 @@ package org.apache.hadoop.yarn.server.api.records;
 
 import java.nio.ByteBuffer;
 
-import org.apache.hadoop.yarn.api.records.NodeId;
-
 public interface RegistrationResponse {
-  public abstract NodeId getNodeId();
   public abstract ByteBuffer getSecretKey();
   
-  public abstract void setNodeId(NodeId nodeId);
   public abstract void setSecretKey(ByteBuffer secretKey);
 }

+ 0 - 35
mapreduce/yarn/yarn-server/yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/RegistrationResponsePBImpl.java

@@ -18,11 +18,8 @@ public class RegistrationResponsePBImpl extends ProtoBase<RegistrationResponsePr
   RegistrationResponseProto.Builder builder = null;
   boolean viaProto = false;
   
-  private NodeId nodeId = null;
   private ByteBuffer secretKey = null;
   
-  
-  
   public RegistrationResponsePBImpl() {
     builder = RegistrationResponseProto.newBuilder();
   }
@@ -41,9 +38,6 @@ public class RegistrationResponsePBImpl extends ProtoBase<RegistrationResponsePr
   }
 
   private void mergeLocalToBuilder() {
-    if (this.nodeId != null) {
-      builder.setNodeId(convertToProtoFormat(this.nodeId));
-    }
     if (this.secretKey != null) {
       builder.setSecretKey(convertToProtoFormat(this.secretKey));
     }
@@ -64,28 +58,7 @@ public class RegistrationResponsePBImpl extends ProtoBase<RegistrationResponsePr
     }
     viaProto = false;
   }
-    
-  
-  @Override
-  public NodeId getNodeId() {
-    RegistrationResponseProtoOrBuilder p = viaProto ? proto : builder;
-    if (this.nodeId != null) {
-      return this.nodeId;
-    }
-    if (!p.hasNodeId()) {
-      return null;
-    }
-    this.nodeId = convertFromProtoFormat(p.getNodeId());
-    return this.nodeId;
-  }
 
-  @Override
-  public void setNodeId(NodeId nodeId) {
-    maybeInitBuilder();
-    if (nodeId == null) 
-      builder.clearNodeId();
-    this.nodeId = nodeId;
-  }
   @Override
   public ByteBuffer getSecretKey() {
     RegistrationResponseProtoOrBuilder p = viaProto ? proto : builder;
@@ -107,12 +80,4 @@ public class RegistrationResponsePBImpl extends ProtoBase<RegistrationResponsePr
     this.secretKey = secretKey;
   }
 
-  private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
-    return new NodeIdPBImpl(p);
-  }
-
-  private NodeIdProto convertToProtoFormat(NodeId t) {
-    return ((NodeIdPBImpl)t).getProto();
-  }
-
 }  

+ 1 - 2
mapreduce/yarn/yarn-server/yarn-server-common/src/main/proto/yarn_server_common_protos.proto

@@ -13,8 +13,7 @@ message NodeStatusProto {
 }
 
 message RegistrationResponseProto {
-  optional NodeIdProto node_id = 1;
-  optional bytes secret_key = 2;
+  optional bytes secret_key = 1;
 }
 
 message HeartbeatResponseProto {

+ 1 - 2
mapreduce/yarn/yarn-server/yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto

@@ -7,8 +7,7 @@ import "yarn_protos.proto";
 import "yarn_server_common_protos.proto";
 
 message RegisterNodeManagerRequestProto {
-  optional string host = 1;
-  optional int32 container_manager_port = 2;
+  optional NodeIdProto node_id = 1;
   optional int32 http_port = 3;
   optional ResourceProto resource = 4;
 }

+ 4 - 5
mapreduce/yarn/yarn-server/yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java

@@ -23,7 +23,6 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.avro.AvroRuntimeException;
@@ -58,6 +57,7 @@ import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.Records;
 
 public class NodeStatusUpdaterImpl extends AbstractService implements
     NodeStatusUpdater {
@@ -164,13 +164,13 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     LOG.info("Connected to ResourceManager at " + this.rmAddress);
     
     RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
-    request.setHost(this.hostName);
-    request.setContainerManagerPort(this.containerManagerPort);
+    this.nodeId = Records.newRecord(NodeId.class);
+    this.nodeId.setHost(this.hostName);
+    this.nodeId.setPort(this.containerManagerPort);
     request.setHttpPort(this.httpPort);
     request.setResource(this.totalResource);
     RegistrationResponse regResponse =
         this.resourceTracker.registerNodeManager(request).getRegistrationResponse();
-    this.nodeId = regResponse.getNodeId();
     if (UserGroupInformation.isSecurityEnabled()) {
       this.secretKeyBytes = regResponse.getSecretKey().array();
     }
@@ -213,7 +213,6 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         // Clone the container to send it to the RM
         org.apache.hadoop.yarn.api.records.Container c = container.cloneAndGetContainer();
         c.setNodeId(this.nodeId);
-        c.setContainerManagerAddress(this.containerManagerBindAddress);
         c.setNodeHttpAddress(this.nodeHttpAddress); // TODO: don't set everytime.
         applicationContainers.add(c);
         ++numActiveContainers;

+ 10 - 10
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java

@@ -351,23 +351,23 @@ public class ClientRMService extends AbstractService implements
     return response;
   }
 
-  private NodeReport createNodeReports(RMNode nodeInfo) {
-    NodeReport node = 
+  private NodeReport createNodeReports(RMNode rmNode) {
+    NodeReport report = 
       recordFactory.newRecordInstance(NodeReport.class);
-    node.setNodeAddress(nodeInfo.getNodeAddress());
-    node.setRackName(nodeInfo.getRackName());
-    node.setCapability(nodeInfo.getTotalCapability());
-    node.setNodeHealthStatus(nodeInfo.getNodeHealthStatus());
-    List<Container> containers = nodeInfo.getRunningContainers();
+    report.setNodeId(rmNode.getNodeID());
+    report.setRackName(rmNode.getRackName());
+    report.setCapability(rmNode.getTotalCapability());
+    report.setNodeHealthStatus(rmNode.getNodeHealthStatus());
+    List<Container> containers = rmNode.getRunningContainers();
     int userdResource = 0;
     for (Container c : containers) {
       userdResource += c.getResource().getMemory();
     }
     Resource usedRsrc = recordFactory.newRecordInstance(Resource.class);
     usedRsrc.setMemory(userdResource);
-    node.setUsed(usedRsrc);
-    node.setNumContainers(nodeInfo.getNumContainers());
-    return node;
+    report.setUsed(usedRsrc);
+    report.setNumContainers(rmNode.getNumContainers());
+    return report;
   }
 
   @Override

+ 0 - 7
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNodeRemovalListener.java

@@ -1,7 +0,0 @@
-package org.apache.hadoop.yarn.server.resourcemanager;
-
-import org.apache.hadoop.yarn.api.records.NodeId;
-
-public interface RMNodeRemovalListener {
-  void RMNodeRemoved(NodeId nodeId);
-}

+ 32 - 90
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java

@@ -64,7 +64,7 @@ import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.util.RackResolver;
 
 public class ResourceTrackerService extends AbstractService implements
-    ResourceTracker, RMNodeRemovalListener {
+    ResourceTracker {
 
   private static final Log LOG = LogFactory.getLog(ResourceTrackerService.class);
 
@@ -76,13 +76,6 @@ public class ResourceTrackerService extends AbstractService implements
   private final NMLivelinessMonitor nmLivelinessMonitor;
   private final ContainerTokenSecretManager containerTokenSecretManager;
 
-  /* we dont garbage collect on nodes. A node can come back up again and re register,
-   * so no use garbage collecting. Though admin can break the RM by bouncing 
-   * nodemanagers on different ports again and again.
-   */
-  private Map<String, NodeId> nodes = new ConcurrentHashMap<String, NodeId>();
-  private final AtomicInteger nodeCounter = new AtomicInteger(0);
-
   private Server server;
   private InetSocketAddress resourceTrackerAddress;
 
@@ -95,9 +88,6 @@ public class ResourceTrackerService extends AbstractService implements
     reboot.setHeartbeatResponse(rebootResp);
   }
 
-  private final ConcurrentMap<NodeId, HeartbeatResponse> lastHeartBeats
-    = new ConcurrentHashMap<NodeId, HeartbeatResponse>();
-
   public ResourceTrackerService(RMContext rmContext,
       NodesListManager nodesListManager,
       NMLivelinessMonitor nmLivelinessMonitor,
@@ -151,8 +141,9 @@ public class ResourceTrackerService extends AbstractService implements
   public RegisterNodeManagerResponse registerNodeManager(
       RegisterNodeManagerRequest request) throws YarnRemoteException {
 
-    String host = request.getHost();
-    int cmPort = request.getContainerManagerPort();
+    NodeId nodeId = request.getNodeId();
+    String host = nodeId.getHost();
+    int cmPort = nodeId.getPort();
     int httpPort = request.getHttpPort();
     Resource capability = request.getResource();
 
@@ -163,23 +154,24 @@ public class ResourceTrackerService extends AbstractService implements
         throw new IOException("Disallowed NodeManager from  " + host); 
       }
 
-      String node = host + ":" + cmPort;
-      NodeId nodeId = mayBeCreateAndGetNodeId(node);
-   
-      createNewNode(nodeId, host, cmPort, httpPort, resolve(host), capability);
+      RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort,
+          httpPort, resolve(host), capability);
+
+      if (this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode) != null) {
+        throw new IOException("Duplicate registration from the node!");
+      }
 
       this.nmLivelinessMonitor.register(nodeId);
 
       LOG.info("NodeManager from node " + host + 
           "(cmPort: " + cmPort + " httpPort: " + httpPort + ") "
           + "registered with capability: " + capability.getMemory()
-          + ", assigned nodeId " + nodeId.getId());
+          + ", assigned nodeId " + nodeId);
 
       RegistrationResponse regResponse = recordFactory.newRecordInstance(
           RegistrationResponse.class);
-      regResponse.setNodeId(nodeId);
-      SecretKey secretKey =
-        this.containerTokenSecretManager.createAndGetSecretKey(node);
+      SecretKey secretKey = this.containerTokenSecretManager
+          .createAndGetSecretKey(nodeId.toString());
       regResponse.setSecretKey(ByteBuffer.wrap(secretKey.getEncoded()));
 
       RegisterNodeManagerResponse response = recordFactory
@@ -187,7 +179,7 @@ public class ResourceTrackerService extends AbstractService implements
       response.setRegistrationResponse(regResponse);
       return response;
     } catch (IOException ioe) {
-      LOG.info("Exception in node registration from " + request.getHost(), ioe);
+      LOG.info("Exception in node registration from " + nodeId.getHost(), ioe);
       throw RPCUtil.getRemoteException(ioe);
     }
   }
@@ -231,17 +223,18 @@ public class ResourceTrackerService extends AbstractService implements
           .newRecordInstance(NodeHeartbeatResponse.class);
 
       // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
-      if (remoteNodeStatus.getResponseId() + 1 == this.lastHeartBeats.get(nodeId)
+      HeartbeatResponse lastHeartbeatResponse = rmNode
+          .getLastHeartBeatResponse();
+      if (remoteNodeStatus.getResponseId() + 1 == lastHeartbeatResponse
            .getResponseId()) {
         LOG.info("Received duplicate heartbeat from node " + 
             rmNode.getNodeAddress());
-        nodeHeartBeatResponse.setHeartbeatResponse(this.lastHeartBeats
-            .get(nodeId));
+        nodeHeartBeatResponse.setHeartbeatResponse(lastHeartbeatResponse);
         return nodeHeartBeatResponse;
-      } else if (remoteNodeStatus.getResponseId() + 1 < this.lastHeartBeats
-          .get(nodeId).getResponseId()) {
+      } else if (remoteNodeStatus.getResponseId() + 1 < lastHeartbeatResponse
+          .getResponseId()) {
         LOG.info("Too far behind rm response id:" +
-            this.lastHeartBeats.get(nodeId).getResponseId() + " nm response id:"
+            lastHeartbeatResponse.getResponseId() + " nm response id:"
             + remoteNodeStatus.getResponseId());
         // TODO: Just sending reboot is not enough. Think more.
         this.rmContext.getDispatcher().getEventHandler().handle(
@@ -249,22 +242,20 @@ public class ResourceTrackerService extends AbstractService implements
         return reboot;
       }
 
-      // 4. Send status to RMNode
+      // Heartbeat response
+      HeartbeatResponse latestResponse = recordFactory
+          .newRecordInstance(HeartbeatResponse.class);
+      latestResponse
+          .setResponseId(lastHeartbeatResponse.getResponseId() + 1);
+      latestResponse.addAllContainersToCleanup(rmNode.pullContainersToCleanUp());
+      latestResponse.addAllApplicationsToCleanup(rmNode.pullAppsToCleanup());
+
+      // 4. Send status to RMNode, saving the latest response.
       this.rmContext.getDispatcher().getEventHandler().handle(
           new RMNodeStatusEvent(nodeId, remoteNodeStatus.getNodeHealthStatus(),
-              remoteNodeStatus.getAllContainers()));
+              remoteNodeStatus.getAllContainers(), latestResponse));
 
-      // Heartbeat response
-      HeartbeatResponse response = recordFactory
-          .newRecordInstance(HeartbeatResponse.class);
-      response
-          .setResponseId(this.lastHeartBeats.get(nodeId).getResponseId() + 1);
-      response.addAllContainersToCleanup(rmNode.pullContainersToCleanUp());
-      response.addAllApplicationsToCleanup(rmNode.pullAppsToCleanup());
-
-      // Save the response
-      this.lastHeartBeats.put(nodeId, response);
-      nodeHeartBeatResponse.setHeartbeatResponse(response);
+      nodeHeartBeatResponse.setHeartbeatResponse(latestResponse);
       return nodeHeartBeatResponse;
     } catch (IOException ioe) {
       LOG.info("Exception in heartbeat from node " + 
@@ -295,55 +286,6 @@ public class ResourceTrackerService extends AbstractService implements
 //    }
   }
 
-  private void createNewNode(NodeId nodeId, String hostName, int cmPort,
-      int httpPort, Node node, Resource capability) throws IOException {
-
-    RMNode rmNode = new RMNodeImpl(nodeId, rmContext, hostName, cmPort,
-        httpPort, node, capability, this);
-
-    if (this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode) != null) {
-      throw new IOException("Duplicate registration from the node!");
-    }
-
-    // Record the new node
-    synchronized (nodes) {
-      LOG.info("DEBUG -- Adding  " + hostName);
-      HeartbeatResponse response = recordFactory
-          .newRecordInstance(HeartbeatResponse.class);
-      response.setResponseId(0);
-      this.lastHeartBeats.put(nodeId, response);
-      nodes.put(rmNode.getNodeAddress(), nodeId);
-    }
-  }
-
-  @Override
-  public void RMNodeRemoved(NodeId nodeId) {
-    RMNode node = null;  
-    synchronized (nodes) {
-      node = this.rmContext.getRMNodes().get(nodeId);
-      if (node != null) {
-        nodes.remove(node.getNodeAddress());
-        this.lastHeartBeats.remove(nodeId);
-      } else {
-        LOG.warn("Unknown node " + nodeId + " unregistered");
-      }
-    }
-    
-    if (node != null) {
-      this.rmContext.getRMNodes().remove(nodeId);
-    }
-  }
-  
-  private  NodeId mayBeCreateAndGetNodeId(String node) {
-    NodeId nodeId;
-    nodeId = nodes.get(node);
-    if (nodeId == null) {
-      nodeId = recordFactory.newRecordInstance(NodeId.class);
-      nodeId.setId(nodeCounter.getAndIncrement());
-    }
-    return nodeId;
-  }
-
   /**
    * resolving the network topology.
    * @param hostName the hostname of this node.

+ 1 - 1
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java

@@ -141,7 +141,7 @@ public class AMLauncher implements Runnable {
 
     Container container = application.getMasterContainer();
 
-    final String containerManagerBindAddress = container.getContainerManagerAddress();
+    final String containerManagerBindAddress = container.getNodeId().toString();
 
     final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again.
 

+ 6 - 4
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java

@@ -39,7 +39,8 @@ public class MemStore implements Store {
 
   public MemStore() {
     nodeId = recordFactory.newRecordInstance(NodeId.class);
-    nodeId.setId(-1);
+    nodeId.setHost("TODO");
+    nodeId.setPort(-1);
   }
 
   @Override
@@ -87,9 +88,10 @@ public class MemStore implements Store {
 
   @Override
   public synchronized NodeId getNextNodeId() throws IOException {
-    int num = nodeId.getId();
-    num++;
-    nodeId.setId(num);
+    // TODO: FIXMEVinodkv
+//    int num = nodeId.getId();
+//    num++;
+//    nodeId.setId(num);
     return nodeId;
   }
 

+ 46 - 41
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java

@@ -98,22 +98,23 @@ public class ZKStore implements Store {
         this.ZK_TIMEOUT,
         createZKWatcher() 
     );
-    this.nodeId.setId(0);
+    // TODO: FIXMEVinodkv
+//    this.nodeId.setId(0);
   }
 
   protected Watcher createZKWatcher() {
     return new ZKWatcher();   
   }
 
-  private NodeReportPBImpl createNodeManagerInfo(RMNode nodeInfo) {
+  private NodeReportPBImpl createNodeManagerInfo(RMNode rmNode) {
     NodeReport node = 
       recordFactory.newRecordInstance(NodeReport.class);
-    node.setNodeAddress(nodeInfo.getNodeAddress());
-    node.setRackName(nodeInfo.getRackName());
-    node.setCapability(nodeInfo.getTotalCapability());
+    node.setNodeId(rmNode.getNodeID());
+    node.setRackName(rmNode.getRackName());
+    node.setCapability(rmNode.getTotalCapability());
     // TODO: FIXME
 //    node.setUsed(nodeInfo.getUsedResource());
-    node.setNumContainers(nodeInfo.getNumContainers());
+    node.setNumContainers(rmNode.getNumContainers());
     return (NodeReportPBImpl)node;
   }
 
@@ -123,32 +124,34 @@ public class ZKStore implements Store {
     if (!doneWithRecovery) return;
     NodeReportPBImpl nodeManagerInfo = createNodeManagerInfo(node);
     byte[] bytes = nodeManagerInfo.getProto().toByteArray();
-    try {
-      zkClient.create(NODES + Integer.toString(node.getNodeID().getId()), bytes, null,
-          CreateMode.PERSISTENT);
-    } catch(InterruptedException ie) {
-      LOG.info("Interrupted", ie);
-      throw new InterruptedIOException("Interrupted");
-    } catch(KeeperException ke) {
-      LOG.info("Keeper exception", ke);
-      throw convertToIOException(ke);
-    }
+    // TODO: FIXMEVinodkv
+//    try {
+//      zkClient.create(NODES + Integer.toString(node.getNodeID().getId()), bytes, null,
+//          CreateMode.PERSISTENT);
+//    } catch(InterruptedException ie) {
+//      LOG.info("Interrupted", ie);
+//      throw new InterruptedIOException("Interrupted");
+//    } catch(KeeperException ke) {
+//      LOG.info("Keeper exception", ke);
+//      throw convertToIOException(ke);
+//    }
   }
 
   @Override
   public synchronized void removeNode(RMNode node) throws IOException {
     if (!doneWithRecovery) return;
     
-    /** remove a storage node **/
-    try {
-      zkClient.delete(NODES + Integer.toString(node.getNodeID().getId()), -1);
-    } catch(InterruptedException ie) {
-      LOG.info("Interrupted", ie);
-      throw new InterruptedIOException("Interrupted");
-    } catch(KeeperException ke) {
-      LOG.info("Keeper exception", ke);
-      throw convertToIOException(ke);
-    }
+//    TODO: FIXME VINODKV
+//    /** remove a storage node **/
+//    try {
+//      zkClient.delete(NODES + Integer.toString(node.getNodeID().getId()), -1);
+//    } catch(InterruptedException ie) {
+//      LOG.info("Interrupted", ie);
+//      throw new InterruptedIOException("Interrupted");
+//    } catch(KeeperException ke) {
+//      LOG.info("Keeper exception", ke);
+//      throw convertToIOException(ke);
+//    }
 
   }
 
@@ -160,17 +163,18 @@ public class ZKStore implements Store {
 
   @Override
   public synchronized NodeId getNextNodeId() throws IOException {
-    int num = nodeId.getId();
-    num++;
-    nodeId.setId(num);
-    try {
-      zkClient.setData(NODES + NODE_ID, nodeId.getProto().toByteArray() , -1);
-    } catch(InterruptedException ie) {
-      LOG.info("Interrupted", ie);
-      throw new InterruptedIOException(ie.getMessage());
-    } catch(KeeperException ke) {
-      throw convertToIOException(ke);
-    }
+//    TODO: FIXME VINODKV
+//    int num = nodeId.getId();
+//    num++;
+//    nodeId.setId(num);
+//    try {
+//      zkClient.setData(NODES + NODE_ID, nodeId.getProto().toByteArray() , -1);
+//    } catch(InterruptedException ie) {
+//      LOG.info("Interrupted", ie);
+//      throw new InterruptedIOException(ie.getMessage());
+//    } catch(KeeperException ke) {
+//      throw convertToIOException(ke);
+//    }
     return nodeId;
   }
 
@@ -458,9 +462,10 @@ public class ZKStore implements Store {
       final Pattern trackerPattern = Pattern.compile(".*:.*");
       final Matcher m = trackerPattern.matcher("");
       for (NodeReport node: nodeInfos) {
-        m.reset(node.getNodeAddress());
+        m.reset(node.getNodeId().getHost());
         if (!m.find()) {
-          LOG.info("Skipping node, bad node-address " + node.getNodeAddress());
+          LOG.info("Skipping node, bad node-address "
+              + node.getNodeId().getHost());
           continue;
         }
         String hostName = m.group(0);
@@ -473,8 +478,8 @@ public class ZKStore implements Store {
         int httpPort = Integer.valueOf(m.group(1));
         RMNode nm = new RMNodeImpl(node.getNodeId(), null,
             hostName, cmPort, httpPort,
-            ResourceTrackerService.resolve(node.getNodeAddress()), 
-            node.getCapability(), null);
+            ResourceTrackerService.resolve(node.getNodeId().getHost()), 
+            node.getCapability());
         nodeManagers.add(nm);
       }
       readLastNodeId();

+ 3 - 0
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
 
 /**
  * Node managers information on available resources 
@@ -115,4 +116,6 @@ public interface RMNode {
   public List<ContainerId> pullContainersToCleanUp();
 
   public List<ApplicationId> pullAppsToCleanup();
+
+  public HeartbeatResponse getLastHeartBeatResponse();
 }

+ 30 - 9
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java

@@ -44,8 +44,8 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.RMNodeRemovalListener;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
@@ -84,8 +84,6 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
   private final String nodeAddress; // The containerManager address
   private final String httpAddress;
   private final Resource totalCapability;
-  private final RMNodeRemovalListener nodeRemovalListener;
-  private Resource availableResource = recordFactory.newRecordInstance(Resource.class);
   private final Node node;
   private final NodeHealthStatus nodeHealthStatus = recordFactory
       .newRecordInstance(NodeHealthStatus.class);
@@ -101,6 +99,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
   /* the list of applications that have finished and need to be purged */
   private final List<ApplicationId> finishedApplications = new ArrayList<ApplicationId>();
 
+  private HeartbeatResponse latestHeartBeatResponse = recordFactory
+      .newRecordInstance(HeartbeatResponse.class);
+
   private static final StateMachineFactory<RMNodeImpl,
                                            RMNodeState,
                                            RMNodeEventType,
@@ -137,8 +138,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
                              RMNodeEvent> stateMachine;
 
   public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
-      int cmPort, int httpPort, Node node, Resource capability,
-      RMNodeRemovalListener nodeRemovalListener) {
+      int cmPort, int httpPort, Node node, Resource capability) {
     this.nodeId = nodeId;
     this.context = context;
     this.hostName = hostName;
@@ -146,13 +146,13 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     this.httpPort = httpPort;
     this.totalCapability = capability; 
     this.nodeAddress = hostName + ":" + cmPort;
-    this.httpAddress = hostName + ":" + httpPort;
-    Resources.addTo(availableResource, capability);
-    this.nodeRemovalListener = nodeRemovalListener;
+    this.httpAddress = hostName + ":" + httpPort;;
     this.node = node;
     this.nodeHealthStatus.setIsNodeHealthy(true);
     this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis());
 
+    this.latestHeartBeatResponse.setResponseId(0);
+
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     this.readLock = lock.readLock();
     this.writeLock = lock.writeLock();
@@ -284,6 +284,18 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     }
   };
 
+  @Override
+  public HeartbeatResponse getLastHeartBeatResponse() {
+
+    this.writeLock.lock();
+
+    try {
+      return this.latestHeartBeatResponse;
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
   public void handle(RMNodeEvent event) {
     LOG.info("Processing " + event.getNodeId() + " of type " + event.getType());
     try {
@@ -343,7 +355,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
       rmNode.killAllContainers();
       rmNode.context.getDispatcher().getEventHandler().handle(
           new NodeRemovedSchedulerEvent(rmNode));
-      rmNode.nodeRemovalListener.RMNodeRemoved(rmNode.nodeId);
+
+      // Remove the node from the system.
+      rmNode.context.getRMNodes().remove(rmNode.nodeId);
       LOG.info("Removed Node " + rmNode.nodeId);
       
     }
@@ -355,6 +369,10 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
 
       RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
+
+      // Switch the last heartbeatresponse.
+      rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
+
       if (!statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
         rmNode.killAllContainers();
         rmNode.context.getDispatcher().getEventHandler().handle(
@@ -403,6 +421,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
       RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
 
+      // Switch the last heartbeatresponse.
+      rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
+
       if (statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
         rmNode.context.getDispatcher().getEventHandler().handle(
             new NodeAddedSchedulerEvent(rmNode));

+ 9 - 1
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java

@@ -8,17 +8,21 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
 
 public class RMNodeStatusEvent extends RMNodeEvent {
 
   private final NodeHealthStatus nodeHealthStatus;
   private Map<ApplicationId, List<Container>> containersCollection;
+  private final HeartbeatResponse latestResponse;
 
   public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus,
-      Map<ApplicationId, List<Container>> collection) {
+      Map<ApplicationId, List<Container>> collection,
+      HeartbeatResponse latestResponse) {
     super(nodeId, RMNodeEventType.STATUS_UPDATE);
     this.nodeHealthStatus = nodeHealthStatus;
     this.containersCollection = collection;
+    this.latestResponse = latestResponse;
   }
 
   public NodeHealthStatus getNodeHealthStatus() {
@@ -28,4 +32,8 @@ public class RMNodeStatusEvent extends RMNodeEvent {
   public Map<ApplicationId, List<Container>> getContainersCollection() {
     return this.containersCollection;
   }
+
+  public HeartbeatResponse getLatestResponse() {
+    return this.latestResponse;
+  }
 }

+ 1 - 1
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java

@@ -380,7 +380,7 @@ public class AppSchedulingInfo {
       }
       LOG.debug("allocate: applicationId=" + applicationId + " container="
           + container.getId() + " host="
-          + container.getContainerManagerAddress());
+          + container.getNodeId().toString());
     }
   }
 

+ 1 - 1
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java

@@ -119,7 +119,7 @@ public class SchedulerApp {
       Resources.addTo(currentConsumption, container.getResource());
       LOG.debug("allocate: applicationId=" + container.getId().getAppId()
           + " container=" + container.getId() + " host="
-          + container.getContainerManagerAddress());
+          + container.getNodeId().toString());
     }
   }
 

+ 1 - 1
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSApp.java

@@ -33,7 +33,7 @@ public class CSApp extends SchedulerApp {
   private final Comparator<CSNode> nodeComparator = new Comparator<CSNode>() {
     @Override
     public int compare(CSNode o1, CSNode o2) {
-      return o1.getNodeID().getId() - o2.getNodeID().getId();
+      return o1.getNodeID().compareTo(o2.getNodeID());
     }
   };
 

+ 2 - 2
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java

@@ -867,12 +867,12 @@ public class LeafQueue implements Queue {
         ContainerToken containerToken = this.recordFactory.newRecordInstance(ContainerToken.class);
         ContainerTokenIdentifier tokenidentifier =
           new ContainerTokenIdentifier(container.getId(),
-              container.getContainerManagerAddress(), container.getResource());
+              container.getNodeId().toString(), container.getResource());
         containerToken.setIdentifier(ByteBuffer.wrap(tokenidentifier.getBytes()));
         containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
         containerToken.setPassword(ByteBuffer.wrap(containerTokenSecretManager
               .createPassword(tokenidentifier)));
-          containerToken.setService(container.getContainerManagerAddress());
+          containerToken.setService(container.getNodeId().toString());
           container.setContainerToken(containerToken);
         }
 

+ 2 - 2
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java

@@ -482,14 +482,14 @@ public class FifoScheduler implements ResourceScheduler {
               recordFactory.newRecordInstance(ContainerToken.class);
           ContainerTokenIdentifier tokenidentifier =
             new ContainerTokenIdentifier(container.getId(),
-                container.getContainerManagerAddress(), container.getResource());
+                container.getNodeId().toString(), container.getResource());
           containerToken.setIdentifier(
               ByteBuffer.wrap(tokenidentifier.getBytes()));
           containerToken.setKind(ContainerTokenIdentifier.KIND.toString());
           containerToken.setPassword(
               ByteBuffer.wrap(containerTokenSecretManager
                   .createPassword(tokenidentifier)));
-          containerToken.setService(container.getContainerManagerAddress());
+          containerToken.setService(container.getNodeId().toString());
           container.setContainerToken(containerToken);
         }
         containers.add(container);

+ 2 - 1
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java

@@ -66,12 +66,13 @@ class NodesPage extends RmView {
         NodeHealthStatus health = ni.getNodeHealthStatus();
         tbody.tr().
             td(ni.getRackName()).
-            td(String.valueOf(ni.getNodeID().getId())).
+            td(String.valueOf(ni.getNodeID().toString())).
             td().a("http://" + ni.getHttpAddress(), ni.getHttpAddress())._().
             td(health.getIsNodeHealthy() ? "Healthy" : "Unhealthy").
             td(Times.format(health.getLastHealthReportTime())).
             td(String.valueOf(health.getHealthReport())).
             td(String.valueOf(ni.getNumContainers())).
+            // TODO: FIXME Vinodkv
 //            td(String.valueOf(ni.getUsedResource().getMemory())).
 //            td(String.valueOf(ni.getAvailableResource().getMemory())).
             _();

+ 16 - 13
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java

@@ -164,7 +164,6 @@ public class MockRM extends ResourceManager {
     for (ContainerId id : releases) {
       Container cont = recordFactory.newRecordInstance(Container.class);
       cont.setId(id);
-      cont.setContainerManagerAddress("");
       //TOOD: set all fields
     }
     return allocateFromAM(attemptId, toRelease, 
@@ -193,18 +192,20 @@ public class MockRM extends ResourceManager {
   }
 
   //from Node
-  public void containerStatus(Container container, int nodeIntId) throws Exception {
+  public void containerStatus(Container container, NodeId nodeId) throws Exception {
     Map<ApplicationId, List<Container>> conts = new HashMap<ApplicationId, List<Container>>();
     conts.put(container.getId().getAppId(), Arrays.asList(new Container[]{}));
-    nodeHeartbeat(nodeIntId, conts, true);
+    nodeHeartbeat(nodeId, conts, true);
   }
 
-  public void registerNode(int nodeIntId, String host, int memory) throws Exception {
+  public void registerNode(String nodeIdStr, int memory) throws Exception {
+    String[] splits = nodeIdStr.split(":");
     NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
-    nodeId.setId(nodeIntId);
-    RegisterNodeManagerRequest req = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
-    req.setContainerManagerPort(1);
-    req.setHost(host);
+    nodeId.setHost(splits[0]);
+    nodeId.setPort(Integer.parseInt(splits[1]));
+    RegisterNodeManagerRequest req = recordFactory
+        .newRecordInstance(RegisterNodeManagerRequest.class);
+    req.setNodeId(nodeId);
     req.setHttpPort(2);
     Resource resource = recordFactory.newRecordInstance(Resource.class);
     resource.setMemory(memory);
@@ -212,14 +213,16 @@ public class MockRM extends ResourceManager {
     getResourceTrackerService().registerNodeManager(req);
   }
 
-  public void nodeHeartbeat(int i, boolean b) throws Exception {
-    nodeHeartbeat(i, new HashMap<ApplicationId, List<Container>>(), b);
+  public void nodeHeartbeat(String nodeIdStr, boolean b) throws Exception {
+    String[] splits = nodeIdStr.split(":");
+    NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
+    nodeId.setHost(splits[0]);
+    nodeId.setPort(Integer.parseInt(splits[1]));
+    nodeHeartbeat(nodeId, new HashMap<ApplicationId, List<Container>>(), b);
   }
 
-  public void nodeHeartbeat(int nodeIntId, Map<ApplicationId, 
+  public void nodeHeartbeat(NodeId nodeId, Map<ApplicationId, 
       List<Container>> conts, boolean isHealthy) throws Exception {
-    NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
-    nodeId.setId(nodeIntId);
     NodeHeartbeatRequest req = recordFactory.newRecordInstance(NodeHeartbeatRequest.class);
     NodeStatus status = recordFactory.newRecordInstance(NodeStatus.class);
     status.setNodeId(nodeId);

+ 3 - 3
mapreduce/yarn/yarn-server/yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java

@@ -16,13 +16,13 @@ public class TestRM {
     rootLogger.setLevel(Level.DEBUG);
     MockRM rm = new MockRM();
     rm.start();
-    rm.registerNode(1, "h1", 5000);
-    rm.registerNode(2, "h2", 10000);
+    rm.registerNode("h1:1234", 5000);
+    rm.registerNode("h2:5678", 10000);
     
     RMApp app = rm.submitApp(2000);
 
     //kick the scheduling
-    rm.nodeHeartbeat(1, true);
+    rm.nodeHeartbeat("h1:1234", true);
 
     RMAppAttempt attempt = app.getCurrentAppAttempt();
     rm.sendAMLaunched(attempt.getAppAttemptId());