浏览代码

Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1234087 13f79535-47bb-0310-9956-ffa450edef68
Aaron Myers 13 年之前
父节点
当前提交
2abe5c818d
共有 21 个文件被更改,包括 133 次插入38 次删除
  1. 3 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 12 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  3. 8 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  4. 8 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  5. 8 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
  6. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  7. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  8. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
  9. 71 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
  10. 2 0
      hadoop-mapreduce-project/CHANGES.txt
  11. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  12. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  13. 2 3
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java
  14. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
  15. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
  16. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
  17. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
  18. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
  19. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
  20. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
  21. 2 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java

+ 3 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -279,6 +279,9 @@ Release 0.23.1 - Unreleased
    HADOOP-7971. Adding back job/pipes/queue commands to bin/hadoop for
    backward compatibility. (Prashath Sharma via acmurthy) 
 
+   HADOOP-7982. UserGroupInformation fails to login if thread's context
+   classloader can't load HadoopLoginModule. (todd)
+
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES

+ 12 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -416,9 +416,19 @@ public class UserGroupInformation {
   
   private static LoginContext
   newLoginContext(String appName, Subject subject) throws LoginException {
-    return new LoginContext(appName, subject, null, new HadoopConfiguration());
+    // Temporarily switch the thread's ContextClassLoader to match this
+    // class's classloader, so that we can properly load HadoopLoginModule
+    // from the JAAS libraries.
+    Thread t = Thread.currentThread();
+    ClassLoader oldCCL = t.getContextClassLoader();
+    t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
+    try {
+      return new LoginContext(appName, subject, null, new HadoopConfiguration());
+    } finally {
+      t.setContextClassLoader(oldCCL);
+    }
   }
-  
+
   private LoginContext getLogin() {
     return user.getLogin();
   }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -169,6 +169,9 @@ Trunk (unreleased changes)
     HDFS-2776. Missing interface annotation on JournalSet. 
     (Brandon Li via jitendra)
 
+    HDFS-2768. BackupNode stop can not close proxy connections because
+    it is not a proxy instance. (Uma Maheswara Rao G via eli)
+
 Release 0.23.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -335,6 +338,11 @@ Release 0.23.1 - UNRELEASED
     HDFS-2790. FSNamesystem.setTimes throws exception with wrong
     configuration name in the message. (Arpit Gupta via eli)
 
+    HDFS-2810. Leases not getting renewed properly by clients (todd)
+
+    HDFS-2751. Datanode may incorrectly drop OS cache behind reads
+    even for short reads. (todd)
+
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -404,11 +404,17 @@ public class DFSClient implements java.io.Closeable {
     return clientRunning;
   }
 
-  /** Renew leases */
-  void renewLease() throws IOException {
+  /**
+   * Renew leases.
+   * @return true if lease was renewed. May return false if this
+   * client has been closed or has no files open.
+   **/
+  boolean renewLease() throws IOException {
     if (clientRunning && !isFilesBeingWrittenEmpty()) {
       namenode.renewLease(clientName);
+      return true;
     }
+    return false;
   }
   
   /**

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java

@@ -67,7 +67,7 @@ import org.apache.hadoop.util.StringUtils;
  * </p>
  */
 class LeaseRenewer {
-  private static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
+  static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
 
   static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
   static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
@@ -407,7 +407,13 @@ class LeaseRenewer {
       final DFSClient c = copies.get(i);
       //skip if current client name is the same as the previous name.
       if (!c.getClientName().equals(previousName)) {
-        c.renewLease();
+        if (!c.renewLease()) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Did not renew lease for client " +
+                c);
+          }
+          continue;
+        }
         previousName = c.getClientName();
         if (LOG.isDebugEnabled()) {
           LOG.debug("Lease renewed for client " + previousName);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -315,7 +315,7 @@ class BlockSender implements java.io.Closeable {
    * close opened files.
    */
   public void close() throws IOException {
-    if (blockInFd != null && shouldDropCacheBehindRead) {
+    if (blockInFd != null && shouldDropCacheBehindRead && isLongRead()) {
       // drop the last few MB of the file from cache
       try {
         NativeIO.posixFadviseIfPossible(

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
@@ -70,7 +71,7 @@ public class BackupNode extends NameNode {
   private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
 
   /** Name-node proxy */
-  NamenodeProtocol namenode;
+  NamenodeProtocolTranslatorPB namenode;
   /** Name-node RPC address */
   String nnRpcAddress;
   /** Name-node HTTP address */
@@ -191,7 +192,7 @@ public class BackupNode extends NameNode {
     }
     // Stop the RPC client
     if (namenode != null) {
-      RPC.stopProxy(namenode);
+      IOUtils.cleanup(LOG, namenode);
     }
     namenode = null;
     // Stop the checkpoint manager

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java

@@ -24,10 +24,9 @@ import java.util.Arrays;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -41,7 +40,7 @@ import org.apache.hadoop.net.NetUtils;
 class EditLogBackupOutputStream extends EditLogOutputStream {
   static int DEFAULT_BUFFER_SIZE = 256;
 
-  private JournalProtocol backupNode;        // RPC proxy to backup node
+  private JournalProtocolTranslatorPB backupNode;  // RPC proxy to backup node
   private NamenodeRegistration bnRegistration;  // backup node registration
   private NamenodeRegistration nnRegistration;  // active node registration
   private EditsDoubleBuffer doubleBuf;
@@ -94,14 +93,14 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
       throw new IOException("BackupEditStream has " + size +
                           " records still to be flushed and cannot be closed.");
     } 
-    RPC.stopProxy(backupNode); // stop the RPC threads
+    IOUtils.cleanup(Storage.LOG, backupNode); // stop the RPC threads
     doubleBuf.close();
     doubleBuf = null;
   }
 
   @Override
   public void abort() throws IOException {
-    RPC.stopProxy(backupNode);
+    IOUtils.cleanup(Storage.LOG, backupNode);
     doubleBuf = null;
   }
 

+ 71 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java

@@ -17,11 +17,14 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.*;
+
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 
 
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -29,6 +32,8 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.base.Supplier;
+
 public class TestLeaseRenewer {
   private String FAKE_AUTHORITY="hdfs://nn1/";
   private UserGroupInformation FAKE_UGI_A =
@@ -46,19 +51,24 @@ public class TestLeaseRenewer {
   
   @Before
   public void setupMocksAndRenewer() throws IOException {
-    MOCK_DFSCLIENT = Mockito.mock(DFSClient.class);
-    Mockito.doReturn(true)
-      .when(MOCK_DFSCLIENT).isClientRunning();
-    Mockito.doReturn((int)FAST_GRACE_PERIOD)
-      .when(MOCK_DFSCLIENT).getHdfsTimeout();
-    Mockito.doReturn("myclient")
-      .when(MOCK_DFSCLIENT).getClientName();
+    MOCK_DFSCLIENT = createMockClient();
     
     renewer = LeaseRenewer.getInstance(
         FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
     renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
 }
  
+  private DFSClient createMockClient() {
+    DFSClient mock = Mockito.mock(DFSClient.class);
+    Mockito.doReturn(true)
+      .when(mock).isClientRunning();
+    Mockito.doReturn((int)FAST_GRACE_PERIOD)
+      .when(mock).getHdfsTimeout();
+    Mockito.doReturn("myclient")
+      .when(mock).getClientName();
+    return mock;
+  }
+
   @Test
   public void testInstanceSharing() throws IOException {
     // Two lease renewers with the same UGI should return
@@ -93,11 +103,11 @@ public class TestLeaseRenewer {
   public void testRenewal() throws Exception {
     // Keep track of how many times the lease gets renewed
     final AtomicInteger leaseRenewalCount = new AtomicInteger();
-    Mockito.doAnswer(new Answer<Void>() {
+    Mockito.doAnswer(new Answer<Boolean>() {
       @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
+      public Boolean answer(InvocationOnMock invocation) throws Throwable {
         leaseRenewalCount.incrementAndGet();
-        return null;
+        return true;
       }
     }).when(MOCK_DFSCLIENT).renewLease();
 
@@ -120,6 +130,57 @@ public class TestLeaseRenewer {
     renewer.closeFile(filePath, MOCK_DFSCLIENT);
   }
   
+  /**
+   * Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
+   * to several DFSClients with the same name, the first of which has no files
+   * open. Previously, this was causing the lease to not get renewed.
+   */
+  @Test
+  public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
+    // First DFSClient has no files open so doesn't renew leases.
+    final DFSClient mockClient1 = createMockClient();
+    Mockito.doReturn(false).when(mockClient1).renewLease();
+    assertSame(renewer, LeaseRenewer.getInstance(
+        FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
+    
+    // Set up a file so that we start renewing our lease.
+    DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
+    String filePath = "/foo";
+    renewer.put(filePath, mockStream1, mockClient1);
+
+    // Second DFSClient does renew lease
+    final DFSClient mockClient2 = createMockClient();
+    Mockito.doReturn(true).when(mockClient2).renewLease();
+    assertSame(renewer, LeaseRenewer.getInstance(
+        FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
+
+    // Set up a file so that we start renewing our lease.
+    DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
+    renewer.put(filePath, mockStream2, mockClient2);
+
+    
+    // Wait for lease to get renewed
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
+          Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
+          return true;
+        } catch (AssertionError err) {
+          LeaseRenewer.LOG.warn("Not yet satisfied", err);
+          return false;
+        } catch (IOException e) {
+          // should not throw!
+          throw new RuntimeException(e);
+        }
+      }
+    }, 100, 10000);
+
+    renewer.closeFile(filePath, mockClient1);
+    renewer.closeFile(filePath, mockClient2);
+  }
+  
   @Test
   public void testThreadName() throws Exception {
     DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);

+ 2 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -187,6 +187,8 @@ Release 0.23.1 - Unreleased
     assign only one off-switch container in a single scheduling
     iteration. (Arun C Murthy via vinodkv)
 
+    MAPREDUCE-3692. yarn-resourcemanager out and log files can get big. (eli)
+
   OPTIMIZATIONS
 
     MAPREDUCE-3567. Extraneous JobConf objects in AM heap. (Vinod Kumar

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -632,7 +632,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
    * The only entry point to change the Job.
    */
   public void handle(JobEvent event) {
-    LOG.info("Processing " + event.getJobId() + " of type " + event.getType());
+    LOG.debug("Processing " + event.getJobId() + " of type " + event.getType());
     try {
       writeLock.lock();
       JobState oldState = getState();

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java

@@ -537,7 +537,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
 
   @Override
   public void handle(TaskEvent event) {
-    LOG.info("Processing " + event.getTaskID() + " of type " + event.getType());
+    LOG.debug("Processing " + event.getTaskID() + " of type " + event.getType());
     try {
       writeLock.lock();
       TaskState oldState = getState();

+ 2 - 3
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java

@@ -315,11 +315,10 @@ public class ProtoOverHadoopRpcEngine implements RpcEngine {
       ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest;
       ProtoSpecificRpcRequest rpcRequest = request.message;
       String methodName = rpcRequest.getMethodName();
-      System.out.println("Call: protocol=" + protocol + ", method="
-          + methodName);
-      if (verbose)
+      if (verbose) {
         log("Call: protocol=" + protocol + ", method="
             + methodName);
+      }
       MethodDescriptor methodDescriptor = service.getDescriptorForType()
           .findMethodByName(methodName);
       if (methodDescriptor == null) {

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java

@@ -373,7 +373,7 @@ public class ApplicationImpl implements Application {
 
     try {
       ApplicationId applicationID = event.getApplicationID();
-      LOG.info("Processing " + applicationID + " of type " + event.getType());
+      LOG.debug("Processing " + applicationID + " of type " + event.getType());
 
       ApplicationState oldState = stateMachine.getCurrentState();
       ApplicationState newState = null;

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java

@@ -811,7 +811,7 @@ public class ContainerImpl implements Container {
       this.writeLock.lock();
 
       ContainerId containerID = event.getContainerID();
-      LOG.info("Processing " + containerID + " of type " + event.getType());
+      LOG.debug("Processing " + containerID + " of type " + event.getType());
 
       ContainerState oldState = stateMachine.getCurrentState();
       ContainerState newState = null;

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java

@@ -181,7 +181,7 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
       this.writeLock.lock();
 
       Path resourcePath = event.getLocalResourceRequest().getPath();
-      LOG.info("Processing " + resourcePath + " of type " + event.getType());
+      LOG.debug("Processing " + resourcePath + " of type " + event.getType());
 
       ResourceState oldState = this.stateMachine.getCurrentState();
       ResourceState newState = null;

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java

@@ -413,7 +413,7 @@ public class RMAppImpl implements RMApp {
 
     try {
       ApplicationId appID = event.getApplicationId();
-      LOG.info("Processing event for " + appID + " of type "
+      LOG.debug("Processing event for " + appID + " of type "
           + event.getType());
       final RMAppState oldState = getState();
       try {

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java

@@ -468,7 +468,7 @@ public class RMAppAttemptImpl implements RMAppAttempt {
 
     try {
       ApplicationAttemptId appAttemptID = event.getApplicationAttemptId();
-      LOG.info("Processing event for " + appAttemptID + " of type "
+      LOG.debug("Processing event for " + appAttemptID + " of type "
           + event.getType());
       final RMAppAttemptState oldState = getAppAttemptState();
       try {

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java

@@ -192,7 +192,7 @@ public class RMContainerImpl implements RMContainer {
   
   @Override
   public void handle(RMContainerEvent event) {
-    LOG.info("Processing " + event.getContainerId() + " of type " + event.getType());
+    LOG.debug("Processing " + event.getContainerId() + " of type " + event.getType());
     try {
       writeLock.lock();
       RMContainerState oldState = getState();

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java

@@ -283,7 +283,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
   }
 
   public void handle(RMNodeEvent event) {
-    LOG.info("Processing " + event.getNodeId() + " of type " + event.getType());
+    LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
     try {
       writeLock.lock();
       RMNodeState oldState = getState();

+ 2 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java

@@ -575,12 +575,12 @@ public class FifoScheduler implements ResourceScheduler {
 
     if (Resources.greaterThanOrEqual(node.getAvailableResource(),
         minimumAllocation)) {
-      LOG.info("Node heartbeat " + rmNode.getNodeID() + 
+      LOG.debug("Node heartbeat " + rmNode.getNodeID() + 
           " available resource = " + node.getAvailableResource());
       
       assignContainers(node);
 
-      LOG.info("Node after allocation " + rmNode.getNodeID() + " resource = "
+      LOG.debug("Node after allocation " + rmNode.getNodeID() + " resource = "
           + node.getAvailableResource());
     }