Bläddra i källkod

YARN-9363. Replaced debug logging with SLF4J parameterized log message.
Contributed by Prabhu Joseph

Eric Yang 6 år sedan
förälder
incheckning
5f6e225166
42 ändrade filer med 188 tillägg och 277 borttagningar
  1. 1 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
  2. 2 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
  3. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
  4. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
  5. 3 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java
  6. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
  7. 9 23
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
  8. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java
  9. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
  10. 2 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
  11. 8 10
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java
  12. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java
  13. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
  14. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java
  15. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
  16. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
  17. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
  18. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
  19. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java
  20. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
  21. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
  22. 9 15
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java
  23. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java
  24. 16 31
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java
  25. 7 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java
  26. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java
  27. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java
  28. 8 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java
  29. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java
  30. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
  31. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
  32. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
  33. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
  34. 4 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
  35. 4 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
  36. 4 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
  37. 9 19
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
  38. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
  39. 4 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
  40. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
  41. 11 18
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
  42. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java

+ 1 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

@@ -242,9 +242,7 @@ public class DelegationTokenRenewer
       } catch (InterruptedException ie) {
         LOG.error("Interrupted while canceling token for " + fs.getUri()
             + "filesystem");
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Exception in removeRenewAction: ", ie);
-        }
+        LOG.debug("Exception in removeRenewAction: {}", ie);
       }
     }
   }

+ 2 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java

@@ -212,11 +212,8 @@ public class ProviderUtils implements YarnServiceConstants {
       log.info("Component instance conf dir already exists: " + compInstanceDir);
     }
 
-    if (log.isDebugEnabled()) {
-      log.debug("Tokens substitution for component instance: " + instance
-          .getCompInstanceName() + System.lineSeparator()
-          + tokensForSubstitution);
-    }
+    log.debug("Tokens substitution for component instance: {}{}{}" + instance
+        .getCompInstanceName(), System.lineSeparator(), tokensForSubstitution);
 
     for (ConfigFile originalFile : compLaunchContext.getConfiguration()
         .getFiles()) {

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java

@@ -287,9 +287,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
     Path domainLogPath =
         new Path(attemptDirCache.getAppAttemptDir(appAttemptId),
             DOMAIN_LOG_PREFIX + appAttemptId.toString());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
-    }
+    LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
     this.logFDsCache.writeDomainLog(
         fs, domainLogPath, objMapper, domain, isAppendSupported);
   }

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java

@@ -27,8 +27,8 @@ import io.netty.channel.epoll.EpollDomainSocketChannel;
 import io.netty.channel.epoll.EpollEventLoopGroup;
 import io.netty.channel.unix.DomainSocketAddress;
 import io.netty.util.concurrent.DefaultThreadFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.SocketAddress;
@@ -39,7 +39,8 @@ import java.util.concurrent.TimeUnit;
  */
 public final class CsiGrpcClient implements AutoCloseable {
 
-  private static final Log LOG = LogFactory.getLog(CsiGrpcClient.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CsiGrpcClient.class);
 
   private final ManagedChannel channel;
 

+ 3 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java

@@ -25,7 +25,8 @@ import io.netty.channel.epoll.EpollServerDomainSocketChannel;
 import org.apache.hadoop.yarn.csi.utils.GrpcHelper;
 
 import java.io.IOException;
-import java.util.logging.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A fake implementation of CSI driver.
@@ -33,7 +34,7 @@ import java.util.logging.Logger;
  */
 public class FakeCsiDriver {
 
-  private static final Logger LOG = Logger
+  private static final Logger LOG = LoggerFactory
       .getLogger(FakeCsiDriver.class.getName());
 
   private Server server;

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.util.timeline;
 import java.util.LinkedHashSet;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
@@ -33,7 +33,8 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSec
  * Set of utility methods to be used across timeline reader and collector.
  */
 public final class TimelineServerUtils {
-  private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineServerUtils.class);
 
   private TimelineServerUtils() {
   }

+ 9 - 23
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java

@@ -36,8 +36,8 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DelegateToFileSystem;
 import org.apache.hadoop.fs.FileContext;
@@ -68,8 +68,8 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
  */
 public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
   
-  private static final Log LOG = LogFactory
-      .getLog(WindowsSecureContainerExecutor.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(WindowsSecureContainerExecutor.class);
   
   public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s";
   
@@ -591,10 +591,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
   
   @Override
   protected void copyFile(Path src, Path dst, String owner) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("copyFile: %s -> %s owner:%s", src.toString(), 
-          dst.toString(), owner));
-    }
+    LOG.debug("copyFile: {} -> {} owner:{}", src, dst, owner);
     Native.Elevated.copy(src,  dst, true);
     Native.Elevated.chown(dst, owner, nodeManagerGroup);
   }
@@ -607,10 +604,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
     // This is similar to how LCE creates dirs
     //
     perms = new FsPermission(DIR_PERM);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("createDir: %s perm:%s owner:%s", 
-          dirPath.toString(), perms.toString(), owner));
-    }
+    LOG.debug("createDir: {} perm:{} owner:{}", dirPath, perms, owner);
     
     super.createDir(dirPath, perms, createParent, owner);
     lfs.setOwner(dirPath, owner, nodeManagerGroup);
@@ -619,10 +613,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
   @Override
   protected void setScriptExecutable(Path script, String owner) 
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("setScriptExecutable: %s owner:%s", 
-          script.toString(), owner));
-    }
+    LOG.debug("setScriptExecutable: {} owner:{}", script, owner);
     super.setScriptExecutable(script, owner);
     Native.Elevated.chown(script, owner, nodeManagerGroup);
   }
@@ -630,10 +621,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
   @Override
   public Path localizeClasspathJar(Path jarPath, Path target, String owner) 
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("localizeClasspathJar: %s %s o:%s", 
-          jarPath, target, owner));
-    }
+    LOG.debug("localizeClasspathJar: {} {} o:{}", jarPath, target, owner);
     createDir(target,  new FsPermission(DIR_PERM), true, owner);
     String fileName = jarPath.getName();
     Path dst = new Path(target, fileName);
@@ -669,9 +657,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
     copyFile(nmPrivateContainerTokensPath, tokenDst, user);
 
     File cwdApp = new File(appStorageDir.toString());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("cwdApp: %s", cwdApp));
-    }
+    LOG.debug("cwdApp: {}", cwdApp);
 
     List<String> command ;
 

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -42,7 +42,7 @@ import java.io.InterruptedIOException;
  */
 public class RecoverPausedContainerLaunch extends ContainerLaunch {
 
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       RecoveredContainerLaunch.class);
 
   public RecoverPausedContainerLaunch(Context context,

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
@@ -56,8 +56,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * a container to kill. The algorithm that picks the container is a plugin.
  */
 public class CGroupElasticMemoryController extends Thread {
-  protected static final Log LOG = LogFactory
-      .getLog(CGroupElasticMemoryController.class);
+  protected static final Logger LOG = LoggerFactory
+      .getLogger(CGroupElasticMemoryController.class);
   private final Clock clock = new MonotonicClock();
   private String yarnCGroupPath;
   private String oomListenerPath;

+ 2 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java

@@ -561,11 +561,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
     String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param);
     PrintWriter pw = null;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          String.format("updateCGroupParam for path: %s with value %s",
-              cGroupParamPath, value));
-    }
+    LOG.debug("updateCGroupParam for path: {} with value {}",
+        cGroupParamPath, value);
 
     try {
       File file = new File(cGroupParamPath);

+ 8 - 10
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java

@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.CpuTimeTracker;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.SysInfoLinux;
@@ -63,8 +63,8 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
     Continue,
     Exit
   }
-  protected static final Log LOG = LogFactory
-      .getLog(CGroupsResourceCalculator.class);
+  protected static final Logger LOG = LoggerFactory
+      .getLogger(CGroupsResourceCalculator.class);
   private static final String PROCFS = "/proc";
   static final String CGROUP = "cgroup";
   static final String CPU_STAT = "cpuacct.stat";
@@ -145,9 +145,7 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
 
   @Override
   public float getCpuUsagePercent() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies);
-    }
+    LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies);
     return cpuTimeTracker.getCpuTrackerUsagePercent();
   }
 
@@ -187,9 +185,9 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
     processPhysicalMemory = getMemorySize(memStat);
     if (memswStat.exists()) {
       processVirtualMemory = getMemorySize(memswStat);
-    } else if(LOG.isDebugEnabled()) {
-      LOG.debug("Swap cgroups monitoring is not compiled into the kernel " +
-          memswStat.getAbsolutePath().toString());
+    } else {
+      LOG.debug("Swap cgroups monitoring is not compiled into the kernel {}",
+          memswStat.getAbsolutePath());
     }
   }
 

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
 import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
@@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
  * it is backward compatible with procfs in terms of virtual memory usage.
  */
 public class CombinedResourceCalculator  extends ResourceCalculatorProcessTree {
-  protected static final Log LOG = LogFactory
-      .getLog(CombinedResourceCalculator.class);
+  protected static final Logger LOG = LoggerFactory
+      .getLogger(CombinedResourceCalculator.class);
   private ProcfsBasedProcessTree procfs;
   private CGroupsResourceCalculator cgroup;
 

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -46,8 +46,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class DefaultOOMHandler implements Runnable {
-  protected static final Log LOG = LogFactory
-      .getLog(DefaultOOMHandler.class);
+  protected static final Logger LOG = LoggerFactory
+      .getLogger(DefaultOOMHandler.class);
   private final Context context;
   private final String memoryStatFile;
   private final CGroupsHandler cgroups;

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java

@@ -20,8 +20,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -32,7 +32,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
  *
  */
 public final class NetworkTagMappingManagerFactory {
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       NetworkTagMappingManagerFactory.class);
 
   private NetworkTagMappingManagerFactory() {}

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java

@@ -22,8 +22,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -44,7 +44,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
  * */
 public class FpgaResourceAllocator {
 
-  static final Log LOG = LogFactory.getLog(FpgaResourceAllocator.class);
+  static final Logger LOG = LoggerFactory.
+      getLogger(FpgaResourceAllocator.class);
 
   private List<FpgaDevice> allowedFpgas = new LinkedList<>();
 

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java

@@ -20,8 +20,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -50,7 +50,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
 @InterfaceAudience.Private
 public class FpgaResourceHandlerImpl implements ResourceHandler {
 
-  static final Log LOG = LogFactory.getLog(FpgaResourceHandlerImpl.class);
+  static final Logger LOG = LoggerFactory.
+      getLogger(FpgaResourceHandlerImpl.class);
 
   private final String REQUEST_FPGA_IP_ID_KEY = "REQUESTED_FPGA_IP_ID";
 

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -52,7 +52,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
  * Allocate GPU resources according to requirements
  */
 public class GpuResourceAllocator {
-  final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
+  final static Logger LOG = LoggerFactory.
+      getLogger(GpuResourceAllocator.class);
   private static final int WAIT_MS_PER_LOOP = 1000;
 
   private Set<GpuDevice> allowedGpuDevices = new TreeSet<>();

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -41,8 +41,8 @@ import java.util.Arrays;
 import java.util.List;
 
 public class GpuResourceHandlerImpl implements ResourceHandler {
-  final static Log LOG = LogFactory
-      .getLog(GpuResourceHandlerImpl.class);
+  final static Logger LOG = LoggerFactory
+      .getLogger(GpuResourceHandlerImpl.class);
 
   // This will be used by container-executor to add necessary clis
   public static final String EXCLUDED_GPUS_CLI_OPTION = "--excluded_gpus";

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 
@@ -36,7 +36,8 @@ public class NumaNodeResource {
   private long usedMemory;
   private int usedCpus;
 
-  private static final Log LOG = LogFactory.getLog(NumaNodeResource.class);
+  private static final Logger LOG = LoggerFactory.
+      getLogger(NumaNodeResource.class);
 
   private Map<ContainerId, Long> containerVsMemUsage =
       new ConcurrentHashMap<>();

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java

@@ -29,8 +29,8 @@ import java.util.Map.Entry;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.StringUtils;
@@ -51,7 +51,8 @@ import com.google.common.annotations.VisibleForTesting;
  */
 public class NumaResourceAllocator {
 
-  private static final Log LOG = LogFactory.getLog(NumaResourceAllocator.class);
+  private static final Logger LOG = LoggerFactory.
+      getLogger(NumaResourceAllocator.class);
 
   // Regex to find node ids, Ex: 'available: 2 nodes (0-1)'
   private static final String NUMA_NODEIDS_REGEX =

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -39,8 +39,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resource
  */
 public class NumaResourceHandlerImpl implements ResourceHandler {
 
-  private static final Log LOG = LogFactory
-      .getLog(NumaResourceHandlerImpl.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(NumaResourceHandlerImpl.class);
   private final NumaResourceAllocator numaResourceAllocator;
   private final String numaCtlCmd;
 

+ 9 - 15
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java

@@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -52,7 +52,8 @@ import java.util.concurrent.ConcurrentHashMap;
  * scheduler.
  * */
 public class DeviceMappingManager {
-  static final Log LOG = LogFactory.getLog(DeviceMappingManager.class);
+  static final Logger LOG = LoggerFactory.
+      getLogger(DeviceMappingManager.class);
 
   private Context nmContext;
   private static final int WAIT_MS_PER_LOOP = 1000;
@@ -163,10 +164,7 @@ public class DeviceMappingManager {
     ContainerId containerId = container.getContainerId();
     int requestedDeviceCount = getRequestedDeviceCount(resourceName,
         requestedResource);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Try allocating " + requestedDeviceCount
-          + " " + resourceName);
-    }
+    LOG.debug("Try allocating {} {}", requestedDeviceCount, resourceName);
     // Assign devices to container if requested some.
     if (requestedDeviceCount > 0) {
       if (requestedDeviceCount > getAvailableDevices(resourceName)) {
@@ -266,10 +264,8 @@ public class DeviceMappingManager {
     while (iter.hasNext()) {
       entry = iter.next();
       if (entry.getValue().equals(containerId)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Recycle devices: " + entry.getKey()
-              + ", type: " + resourceName + " from " + containerId);
-        }
+        LOG.debug("Recycle devices: {}, type: {} from {}", entry.getKey(),
+            resourceName, containerId);
         iter.remove();
       }
     }
@@ -317,10 +313,8 @@ public class DeviceMappingManager {
     ContainerId containerId = c.getContainerId();
     Map<String, String> env = c.getLaunchContext().getEnvironment();
     if (null == dps) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Customized device plugin scheduler is preferred "
-            + "but not implemented, use default logic");
-      }
+      LOG.debug("Customized device plugin scheduler is preferred "
+          + "but not implemented, use default logic");
       defaultScheduleAction(allowed, used,
           assigned, containerId, count);
     } else {

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java

@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -47,7 +47,8 @@ import java.util.Map;
  *
  * */
 public class DevicePluginAdapter implements ResourcePlugin {
-  private final static Log LOG = LogFactory.getLog(DevicePluginAdapter.class);
+  private final static Logger LOG = LoggerFactory.
+      getLogger(DevicePluginAdapter.class);
 
   private final String resourceName;
 

+ 16 - 31
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
 import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin;
@@ -47,7 +47,7 @@ import java.util.Set;
 public class DeviceResourceDockerRuntimePluginImpl
     implements DockerCommandPlugin {
 
-  final static Log LOG = LogFactory.getLog(
+  final static Logger LOG = LoggerFactory.getLogger(
       DeviceResourceDockerRuntimePluginImpl.class);
 
   private String resourceName;
@@ -73,9 +73,7 @@ public class DeviceResourceDockerRuntimePluginImpl
   public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
       Container container) throws ContainerExecutionException {
     String containerId = container.getContainerId().toString();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Try to update docker run command for: " + containerId);
-    }
+    LOG.debug("Try to update docker run command for: {}", containerId);
     if(!requestedDevice(resourceName, container)) {
       return;
     }
@@ -89,17 +87,12 @@ public class DeviceResourceDockerRuntimePluginImpl
     }
     // handle runtime
     dockerRunCommand.addRuntime(deviceRuntimeSpec.getContainerRuntime());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handle docker container runtime type: "
-          + deviceRuntimeSpec.getContainerRuntime() + " for container: "
-          + containerId);
-    }
+    LOG.debug("Handle docker container runtime type: {} for container: {}",
+        deviceRuntimeSpec.getContainerRuntime(), containerId);
     // handle device mounts
     Set<MountDeviceSpec> deviceMounts = deviceRuntimeSpec.getDeviceMounts();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handle device mounts: " + deviceMounts + " for container: "
-          + containerId);
-    }
+    LOG.debug("Handle device mounts: {} for container: {}", deviceMounts,
+        containerId);
     for (MountDeviceSpec mountDeviceSpec : deviceMounts) {
       dockerRunCommand.addDevice(
           mountDeviceSpec.getDevicePathInHost(),
@@ -107,10 +100,8 @@ public class DeviceResourceDockerRuntimePluginImpl
     }
     // handle volume mounts
     Set<MountVolumeSpec> mountVolumeSpecs = deviceRuntimeSpec.getVolumeMounts();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handle volume mounts: " + mountVolumeSpecs + " for container: "
-          + containerId);
-    }
+    LOG.debug("Handle volume mounts: {} for container: {}", mountVolumeSpecs,
+        containerId);
     for (MountVolumeSpec mountVolumeSpec : mountVolumeSpecs) {
       if (mountVolumeSpec.getReadOnly()) {
         dockerRunCommand.addReadOnlyMountLocation(
@@ -124,10 +115,8 @@ public class DeviceResourceDockerRuntimePluginImpl
     }
     // handle envs
     dockerRunCommand.addEnv(deviceRuntimeSpec.getEnvs());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handle envs: " + deviceRuntimeSpec.getEnvs()
-          + " for container: " + containerId);
-    }
+    LOG.debug("Handle envs: {} for container: {}",
+        deviceRuntimeSpec.getEnvs(), containerId);
   }
 
   @Override
@@ -147,10 +136,8 @@ public class DeviceResourceDockerRuntimePluginImpl
             DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND);
         command.setDriverName(volumeSec.getVolumeDriver());
         command.setVolumeName(volumeSec.getVolumeName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Get volume create request from plugin:" + volumeClaims
-              + " for container: " + container.getContainerId().toString());
-        }
+        LOG.debug("Get volume create request from plugin:{} for container: {}",
+            volumeClaims, container.getContainerId());
         return command;
       }
     }
@@ -195,10 +182,8 @@ public class DeviceResourceDockerRuntimePluginImpl
     allocated = devicePluginAdapter
         .getDeviceMappingManager()
         .getAllocatedDevices(resourceName, containerId);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Get allocation from deviceMappingManager: "
-          + allocated + ", " + resourceName + " for container: " + containerId);
-    }
+    LOG.debug("Get allocation from deviceMappingManager: {}, {} for"
+        + " container: {}", allocated, resourceName, containerId);
     cachedAllocation.put(containerId, allocated);
     return allocated;
   }

+ 7 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java

@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -53,7 +53,8 @@ import java.util.Set;
  * */
 public class DeviceResourceHandlerImpl implements ResourceHandler {
 
-  static final Log LOG = LogFactory.getLog(DeviceResourceHandlerImpl.class);
+  static final Logger LOG = LoggerFactory.
+      getLogger(DeviceResourceHandlerImpl.class);
 
   private final String resourceName;
   private final DevicePlugin devicePlugin;
@@ -134,10 +135,7 @@ public class DeviceResourceHandlerImpl implements ResourceHandler {
     String containerIdStr = container.getContainerId().toString();
     DeviceMappingManager.DeviceAllocation allocation =
         deviceMappingManager.assignDevices(resourceName, container);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Allocated to "
-          + containerIdStr + ": " + allocation);
-    }
+    LOG.debug("Allocated to {}: {}", containerIdStr, allocation);
     DeviceRuntimeSpec spec;
     try {
       spec = devicePlugin.onDevicesAllocated(
@@ -291,13 +289,9 @@ public class DeviceResourceHandlerImpl implements ResourceHandler {
     }
     DeviceType deviceType;
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Try to get device type from device path: " + devName);
-      }
+      LOG.debug("Try to get device type from device path: {}", devName);
       String output = shellWrapper.getDeviceFileType(devName);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("stat output:" + output);
-      }
+      LOG.debug("stat output:{}", output);
       deviceType = output.startsWith("c") ? DeviceType.CHAR : DeviceType.BLOCK;
     } catch (IOException e) {
       String msg =

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
@@ -33,7 +33,8 @@ import java.util.Set;
  * */
 public class DeviceResourceUpdaterImpl extends NodeResourceUpdaterPlugin {
 
-  final static Log LOG = LogFactory.getLog(DeviceResourceUpdaterImpl.class);
+  final static Logger LOG = LoggerFactory.
+      getLogger(DeviceResourceUpdaterImpl.class);
 
   private String resourceName;
   private DevicePlugin devicePlugin;

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java

@@ -19,8 +19,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -37,7 +37,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMResourceInfo;
 
 public class FpgaResourcePlugin implements ResourcePlugin {
-  private static final Log LOG = LogFactory.getLog(FpgaResourcePlugin.class);
+  private static final Logger LOG = LoggerFactory.
+      getLogger(FpgaResourcePlugin.class);
 
   private ResourceHandler fpgaResourceHandler = null;
 

+ 8 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugi
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -50,7 +50,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * Implementation to use nvidia-docker v1 as GPU docker command plugin.
  */
 public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
-  final static Log LOG = LogFactory.getLog(NvidiaDockerV1CommandPlugin.class);
+  final static Logger LOG = LoggerFactory.
+      getLogger(NvidiaDockerV1CommandPlugin.class);
 
   private Configuration conf;
   private Map<String, Set<String>> additionalCommands = null;
@@ -121,9 +122,7 @@ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
           addToCommand(DEVICE_OPTION, getValue(str));
         } else if (str.startsWith(VOLUME_DRIVER_OPTION)) {
           volumeDriver = getValue(str);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Found volume-driver:" + volumeDriver);
-          }
+          LOG.debug("Found volume-driver:{}", volumeDriver);
         } else if (str.startsWith(MOUNT_RO_OPTION)) {
           String mount = getValue(str);
           if (!mount.endsWith(":ro")) {
@@ -286,15 +285,11 @@ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
         if (VOLUME_NAME_PATTERN.matcher(mountSource).matches()) {
           // This is a valid named volume
           newVolumeName = mountSource;
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Found volume name for GPU:" + newVolumeName);
-          }
+          LOG.debug("Found volume name for GPU:{}", newVolumeName);
           break;
         } else{
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Failed to match " + mountSource
-                + " to named-volume regex pattern");
-          }
+          LOG.debug("Failed to match {} to named-volume regex pattern",
+              mountSource);
         }
       }
     }

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java

@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
@@ -41,7 +41,8 @@ import java.util.Set;
  * Implementation to use nvidia-docker v2 as GPU docker command plugin.
  */
 public class NvidiaDockerV2CommandPlugin implements DockerCommandPlugin {
-  final static Log LOG = LogFactory.getLog(NvidiaDockerV2CommandPlugin.class);
+  final static Logger LOG = LoggerFactory.
+      getLogger(NvidiaDockerV2CommandPlugin.class);
 
   private String nvidiaRuntime = "nvidia";
   private String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES";

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -33,8 +33,8 @@ import java.lang.management.ManagementFactory;
  * Class for testing {@link NodeManagerMXBean} implementation.
  */
 public class TestNodeManagerMXBean {
-  public static final Log LOG = LogFactory.getLog(
-          TestNodeManagerMXBean.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestNodeManagerMXBean.class);
 
   @Test
   public void testNodeManagerMXBean() throws Exception {

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java

@@ -18,8 +18,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -43,8 +43,8 @@ import static org.mockito.Mockito.when;
  * Test for elastic non-strict memory controller based on cgroups.
  */
 public class TestCGroupElasticMemoryController {
-  protected static final Log LOG = LogFactory
-      .getLog(TestCGroupElasticMemoryController.class);
+  protected static final Logger LOG = LoggerFactory
+      .getLogger(TestCGroupElasticMemoryController.class);
   private YarnConfiguration conf = new YarnConfiguration();
   private File script = new File("target/" +
       TestCGroupElasticMemoryController.class.getName());

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java

@@ -92,10 +92,8 @@ public class AbstractAutoCreatedLeafQueue extends LeafQueue {
       // note: we currently set maxCapacity to capacity
       // this might be revised later
       setMaxCapacity(nodeLabel, entitlement.getMaxCapacity());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("successfully changed to {} for queue {}", capacity, this
+      LOG.debug("successfully changed to {} for queue {}", capacity, this
             .getQueueName());
-      }
 
       //update queue used capacity etc
       CSQueueUtils.updateQueueStatistics(resourceCalculator,

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java

@@ -1053,10 +1053,8 @@ public class CapacityScheduler extends
           + " to scheduler from user " + application.getUser() + " in queue "
           + queue.getQueueName());
       if (isAttemptRecovering) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(applicationAttemptId
-              + " is recovering. Skipping notifying ATTEMPT_ADDED");
-        }
+        LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
+            applicationAttemptId);
       } else{
         rmContext.getDispatcher().getEventHandler().handle(
             new RMAppAttemptEvent(applicationAttemptId,

+ 4 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java

@@ -593,10 +593,8 @@ public class ParentQueue extends AbstractCSQueue {
         NodeType.NODE_LOCAL);
 
     while (canAssign(clusterResource, node)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to assign containers to child-queue of "
-            + getQueueName());
-      }
+      LOG.debug("Trying to assign containers to child-queue of {}",
+          getQueueName());
 
       // Are we over maximum-capacity for this queue?
       // This will also consider parent's limits and also continuous reservation
@@ -781,10 +779,8 @@ public class ParentQueue extends AbstractCSQueue {
     for (Iterator<CSQueue> iter = sortAndGetChildrenAllocationIterator(
         candidates.getPartition()); iter.hasNext(); ) {
       CSQueue childQueue = iter.next();
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath()
-          + " stats: " + childQueue);
-      }
+      LOG.debug("Trying to assign to queue: {} stats: {}",
+          childQueue.getQueuePath(), childQueue);
 
       // Get ResourceLimits of child queue before assign containers
       ResourceLimits childLimits =

+ 4 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java

@@ -221,15 +221,10 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
                 + parentQueue.getQueueName(), e);
       }
     } else{
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Skipping queue management updates for parent queue "
-                + parentQueue
-                .getQueuePath() + " "
-                + "since configuration for auto creating queues beyond "
-                + "parent's "
-                + "guaranteed capacity is disabled");
-      }
+      LOG.debug("Skipping queue management updates for parent queue {} "
+          + "since configuration for auto creating queues beyond "
+          + "parent's guaranteed capacity is disabled",
+          parentQueue.getQueuePath());
     }
     return queueManagementChanges;
   }

+ 4 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java

@@ -669,19 +669,15 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
           if (updatedQueueTemplate.getQueueCapacities().
               getCapacity(nodeLabel) > 0) {
             if (isActive(leafQueue, nodeLabel)) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Queue is already active." + " Skipping activation : "
-                    + leafQueue.getQueueName());
-              }
+              LOG.debug("Queue is already active. Skipping activation : {}",
+                  leafQueue.getQueueName());
             } else{
               activate(leafQueue, nodeLabel);
             }
           } else{
             if (!isActive(leafQueue, nodeLabel)) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Queue is already de-activated. Skipping "
-                    + "de-activation : " + leafQueue.getQueueName());
-              }
+              LOG.debug("Queue is already de-activated. Skipping "
+                  + "de-activation : {}", leafQueue.getQueueName());
             } else{
               deactivate(leafQueue, nodeLabel);
             }

+ 9 - 19
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java

@@ -154,18 +154,13 @@ public final class PlacementConstraintsUtil {
     if (schedulerNode.getNodeAttributes() == null ||
         !schedulerNode.getNodeAttributes().contains(requestAttribute)) {
       if (opCode == NodeAttributeOpCode.NE) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Incoming requestAttribute:" + requestAttribute
-              + "is not present in " + schedulerNode.getNodeID()
-              + ", however opcode is NE. Hence accept this node.");
-        }
+        LOG.debug("Incoming requestAttribute:{} is not present in {},"
+            + " however opcode is NE. Hence accept this node.",
+            requestAttribute, schedulerNode.getNodeID());
         return true;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Incoming requestAttribute:" + requestAttribute
-            + "is not present in " + schedulerNode.getNodeID()
-            + ", skip such node.");
-      }
+      LOG.debug("Incoming requestAttribute:{} is not present in {},"
+          + " skip such node.", requestAttribute, schedulerNode.getNodeID());
       return false;
     }
 
@@ -183,21 +178,16 @@ public final class PlacementConstraintsUtil {
       }
       if (requestAttribute.equals(nodeAttribute)) {
         if (isOpCodeMatches(requestAttribute, nodeAttribute, opCode)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Incoming requestAttribute:" + requestAttribute
-                    + " matches with node:" + schedulerNode.getNodeID());
-          }
+          LOG.debug("Incoming requestAttribute:{} matches with node:{}",
+              requestAttribute, schedulerNode.getNodeID());
           found = true;
           return found;
         }
       }
     }
     if (!found) {
-      if (LOG.isDebugEnabled()) {
-        LOG.info("skip this node:" + schedulerNode.getNodeID()
-            + " for requestAttribute:" + requestAttribute);
-      }
+      LOG.debug("skip this node:{} for requestAttribute:{}",
+          schedulerNode.getNodeID(), requestAttribute);
       return false;
     }
     return true;

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java

@@ -426,10 +426,8 @@ public abstract class FSQueue implements Queue, Schedulable {
    */
   boolean assignContainerPreCheck(FSSchedulerNode node) {
     if (node.getReservedContainer() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Assigning container failed on node '" + node.getNodeName()
-            + " because it has reserved containers.");
-      }
+      LOG.debug("Assigning container failed on node '{}' because it has"
+          + " reserved containers.", node.getNodeName());
       return false;
     } else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) {
       if (LOG.isDebugEnabled()) {

+ 4 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java

@@ -397,9 +397,8 @@ public class FifoScheduler extends
     LOG.info("Accepted application " + applicationId + " from user: " + user
         + ", currently num of applications: " + applications.size());
     if (isAppRecovering) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
-      }
+      LOG.debug("{} is recovering. Skip notifying APP_ACCEPTED",
+          applicationId);
     } else {
       rmContext.getDispatcher().getEventHandler()
         .handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
@@ -429,10 +428,8 @@ public class FifoScheduler extends
     LOG.info("Added Application Attempt " + appAttemptId
         + " to scheduler from user " + application.getUser());
     if (isAttemptRecovering) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(appAttemptId
-            + " is recovering. Skipping notifying ATTEMPT_ADDED");
-      }
+      LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
+          appAttemptId);
     } else {
       rmContext.getDispatcher().getEventHandler().handle(
         new RMAppAttemptEvent(appAttemptId,

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java

@@ -396,10 +396,8 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
       SchedulingMode schedulingMode) {
     // We will only look at node label = nodeLabelToLookAt according to
     // schedulingMode and partition of node.
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("precheckNode is invoked for " + schedulerNode.getNodeID() + ","
-          + schedulingMode);
-    }
+    LOG.debug("precheckNode is invoked for {},{}", schedulerNode.getNodeID(),
+        schedulingMode);
     String nodePartitionToLookAt;
     if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
       nodePartitionToLookAt = schedulerNode.getPartition();

+ 11 - 18
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java

@@ -223,10 +223,8 @@ public class Application {
     if (requests == null) {
       requests = new HashMap<String, ResourceRequest>();
       this.requests.put(schedulerKey, requests);
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Added priority=" + schedulerKey.getPriority()
-            + " application="+ applicationId);
-      }
+      LOG.debug("Added priority={} application={}", schedulerKey.getPriority(),
+          applicationId);
     }
     
     final Resource capability = requestSpec.get(schedulerKey);
@@ -242,10 +240,7 @@ public class Application {
     LOG.info("Added task " + task.getTaskId() + " to application " + 
         applicationId + " at priority " + schedulerKey.getPriority());
     
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("addTask: application=" + applicationId
-        + " #asks=" + ask.size());
-    }
+    LOG.debug("addTask: application={} #asks={}", applicationId, ask.size());
     
     // Create resource requests
     for (String host : task.getHosts()) {
@@ -320,12 +315,12 @@ public class Application {
   
   public synchronized List<Container> getResources() throws IOException {
     if(LOG.isDebugEnabled()) {
-      LOG.debug("getResources begin:" + " application=" + applicationId
-        + " #ask=" + ask.size());
+      LOG.debug("getResources begin: application={} #ask={}",
+          applicationId, ask.size());
 
       for (ResourceRequest request : ask) {
-        LOG.debug("getResources:" + " application=" + applicationId
-          + " ask-request=" + request);
+        LOG.debug("getResources: application={} ask-request={}",
+            applicationId, request);
       }
     }
     
@@ -346,8 +341,8 @@ public class Application {
     ask.clear();
     
     if(LOG.isDebugEnabled()) {
-      LOG.debug("getResources() for " + applicationId + ":"
-        + " ask=" + ask.size() + " received=" + containers.size());
+      LOG.debug("getResources() for {}: ask={} received={}",
+          applicationId, ask.size(), containers.size());
     }
     
     return containers;
@@ -451,10 +446,8 @@ public class Application {
     
     updateResourceRequest(requests.get(ResourceRequest.ANY));
     
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("updateResourceDemands:" + " application=" + applicationId
-        + " #asks=" + ask.size());
-    }
+    LOG.debug("updateResourceDemands: application={} #asks={}",
+        applicationId, ask.size());
   }
   
   private void updateResourceRequest(ResourceRequest request) {

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java

@@ -107,9 +107,7 @@ class FlowScanner implements RegionScanner, Closeable {
           YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
           YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" batch size=" + batchSize);
-    }
+    LOG.debug(" batch size={}", batchSize);
   }