Browse Source

YARN-9349. Changed logging to use slf4j api.
Contributed by Prabhu Joseph

Eric Yang 6 years ago
parent
commit
2064ca015d
100 changed files with 439 additions and 913 deletions
  1. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
  2. 11 18
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
  3. 4 10
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
  4. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
  5. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
  6. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
  7. 2 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
  8. 1 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
  9. 1 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
  10. 8 18
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
  11. 15 34
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
  12. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
  13. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
  14. 9 25
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
  15. 4 11
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
  16. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
  17. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
  18. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
  19. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
  20. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
  21. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
  22. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
  23. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
  24. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
  25. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
  26. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
  27. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
  28. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
  29. 3 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
  30. 10 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
  31. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
  32. 5 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
  33. 7 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
  34. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
  35. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
  36. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
  37. 6 14
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
  38. 2 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
  39. 8 18
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
  40. 14 32
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
  41. 3 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
  42. 5 15
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
  43. 7 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
  44. 2 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
  45. 6 12
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
  46. 3 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
  47. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
  48. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java
  49. 2 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java
  50. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
  51. 2 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
  52. 3 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
  53. 7 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
  54. 21 43
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
  55. 4 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
  56. 1 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
  57. 3 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
  58. 7 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
  59. 3 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
  60. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
  61. 9 20
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
  62. 1 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
  63. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
  64. 1 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DockerContainerDeletionTask.java
  65. 4 14
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
  66. 12 22
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
  67. 10 23
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
  68. 3 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
  69. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
  70. 5 11
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
  71. 3 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
  72. 5 15
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
  73. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
  74. 13 27
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
  75. 7 12
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
  76. 4 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
  77. 14 23
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
  78. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
  79. 3 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
  80. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
  81. 10 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
  82. 5 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java
  83. 5 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
  84. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
  85. 3 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
  86. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
  87. 20 57
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
  88. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
  89. 5 11
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
  90. 18 38
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
  91. 4 10
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
  92. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
  93. 3 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
  94. 1 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
  95. 3 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
  96. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java
  97. 2 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
  98. 5 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
  99. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
  100. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java

@@ -167,9 +167,8 @@ public final class ProviderUtils {
         }
         if (clazz != null) {
           if (fileSystemClass.isAssignableFrom(clazz)) {
-            LOG.debug("Filesystem based provider" +
-                " excluded from provider path due to recursive dependency: "
-                + provider);
+            LOG.debug("Filesystem based provider excluded from provider " +
+                "path due to recursive dependency: {}", provider);
           } else {
             if (newProviderPath.length() > 0) {
               newProviderPath.append(",");

+ 11 - 18
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java

@@ -138,20 +138,17 @@ public class ResourceUtils {
       Map<String, ResourceInformation> res) {
     ResourceInformation ri;
     if (!res.containsKey(MEMORY)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding resource type - name = " + MEMORY + ", units = "
-            + ResourceInformation.MEMORY_MB.getUnits() + ", type = "
-            + ResourceTypes.COUNTABLE);
-      }
+      LOG.debug("Adding resource type - name = {}, units = {}, type = {}",
+          MEMORY, ResourceInformation.MEMORY_MB.getUnits(),
+          ResourceTypes.COUNTABLE);
       ri = ResourceInformation.newInstance(MEMORY,
           ResourceInformation.MEMORY_MB.getUnits());
       res.put(MEMORY, ri);
     }
     if (!res.containsKey(VCORES)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding resource type - name = " + VCORES
-            + ", units = , type = " + ResourceTypes.COUNTABLE);
-      }
+      LOG.debug("Adding resource type - name = {}, units = {}, type = {}",
+          VCORES, ResourceInformation.VCORES.getUnits(),
+          ResourceTypes.COUNTABLE);
       ri = ResourceInformation.newInstance(VCORES);
       res.put(VCORES, ri);
     }
@@ -189,9 +186,9 @@ public class ResourceUtils {
       String resourceTypesKey, String schedulerKey, long schedulerDefault) {
     long value = conf.getLong(resourceTypesKey, -1L);
     if (value == -1) {
-      LOG.debug("Mandatory Resource '" + resourceTypesKey + "' is not "
+      LOG.debug("Mandatory Resource '{}' is not "
           + "configured in resource-types config file. Setting allocation "
-          + "specified using '" + schedulerKey + "'");
+          + "specified using '{}'", resourceTypesKey, schedulerKey);
       value = conf.getLong(schedulerKey, schedulerDefault);
     }
     return value;
@@ -450,9 +447,7 @@ public class ResourceUtils {
       Configuration conf) {
     try {
       InputStream ris = getConfInputStream(resourceFile, conf);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Found " + resourceFile + ", adding to configuration");
-      }
+      LOG.debug("Found {}, adding to configuration", resourceFile);
       conf.addResource(ris);
     } catch (FileNotFoundException fe) {
       LOG.info("Unable to find '" + resourceFile + "'.");
@@ -575,10 +570,8 @@ public class ResourceUtils {
       }
       nodeResources.get(resourceType).setValue(resourceValue);
       nodeResources.get(resourceType).setUnits(units);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting value for resource type " + resourceType + " to "
-            + resourceValue + " with units " + units);
-      }
+      LOG.debug("Setting value for resource type {} to {} with units {}",
+          resourceType, resourceValue, units);
     }
   }
 

+ 4 - 10
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java

@@ -1269,19 +1269,15 @@ public class ApplicationMaster {
 
     @Override
     public void onContainerStopped(ContainerId containerId) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Succeeded to stop Container " + containerId);
-      }
+      LOG.debug("Succeeded to stop Container {}", containerId);
       containers.remove(containerId);
     }
 
     @Override
     public void onContainerStatusReceived(ContainerId containerId,
         ContainerStatus containerStatus) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container Status: id=" + containerId + ", status=" +
-            containerStatus);
-      }
+      LOG.debug("Container Status: id={}, status={}", containerId,
+          containerStatus);
 
       // If promote_opportunistic_after_start is set, automatically promote
       // opportunistic containers to guaranteed.
@@ -1305,9 +1301,7 @@ public class ApplicationMaster {
     @Override
     public void onContainerStarted(ContainerId containerId,
         Map<String, ByteBuffer> allServiceResponse) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Succeeded to start Container " + containerId);
-      }
+      LOG.debug("Succeeded to start Container {}", containerId);
       Container container = containers.get(containerId);
       if (container != null) {
         applicationMaster.nmClientAsync.getContainerStatusAsync(

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java

@@ -361,9 +361,7 @@ public class SystemServiceManagerImpl extends AbstractService
   private Service getServiceDefinition(Path filePath) {
     Service service = null;
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading service definition from FS: " + filePath);
-      }
+      LOG.debug("Loading service definition from FS: {}", filePath);
       service = jsonSerDeser.load(fs, filePath);
     } catch (IOException e) {
       LOG.info("Error while loading service definition from FS: {}", e);

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java

@@ -1189,7 +1189,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
           .append(entry.getValue().getResource().getFile())
           .append(System.lineSeparator());
     }
-    LOG.debug(builder.toString());
+    LOG.debug("{}", builder);
   }
 
   private String buildCommandLine(Service app, Configuration conf,
@@ -1249,7 +1249,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     }
     if (!UserGroupInformation.isSecurityEnabled()) {
       String userName = UserGroupInformation.getCurrentUser().getUserName();
-      LOG.debug("Run as user " + userName);
+      LOG.debug("Run as user {}", userName);
       // HADOOP_USER_NAME env is used by UserGroupInformation when log in
       // This env makes AM run as this user
       env.put("HADOOP_USER_NAME", userName);
@@ -1405,7 +1405,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       if (LOG.isDebugEnabled()) {
         if (tokens != null && tokens.length != 0) {
           for (Token<?> token : tokens) {
-            LOG.debug("Got DT: " + token);
+            LOG.debug("Got DT: {}", token);
           }
         }
       }

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java

@@ -196,7 +196,7 @@ public class AbstractLauncher {
 
         String key = entry.getKey();
         LocalResource val = entry.getValue();
-        log.debug(key + "=" + ServiceUtils.stringify(val.getResource()));
+        log.debug("{} = {}", key, ServiceUtils.stringify(val.getResource()));
       }
     }
   }

+ 2 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java

@@ -77,16 +77,12 @@ public class ServiceMetricsSink implements MetricsSink {
     }
 
     if (isServiceMetrics && appId != null) {
-      if (log.isDebugEnabled()) {
-        log.debug("Publishing service metrics. " + record);
-      }
+      log.debug("Publishing service metrics. {}", record);
       serviceTimelinePublisher.publishMetrics(record.metrics(), appId,
           ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(),
           record.timestamp());
     } else if (isComponentMetrics) {
-      if (log.isDebugEnabled()) {
-        log.debug("Publishing Component metrics. " + record);
-      }
+      log.debug("Publishing Component metrics. {}", record);
       serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(),
           ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp());
     }

+ 1 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java

@@ -857,10 +857,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
 
     int loggingCounter = logInterval;
     do {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Check the condition for main loop.");
-      }
-
+      LOG.debug("Check the condition for main loop.");
       boolean result = check.get();
       if (result) {
         LOG.info("Exits the main loop.");

+ 1 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java

@@ -465,10 +465,7 @@ extends AbstractService {
 
     int loggingCounter = logInterval;
     do {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Check the condition for main loop.");
-      }
-
+      LOG.debug("Check the condition for main loop.");
       boolean result = check.get();
       if (result) {
         LOG.info("Exits the main loop.");

+ 8 - 18
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java

@@ -80,10 +80,8 @@ public class ContainerManagementProtocolProxy {
               + " (" + maxConnectedNMs + ") can not be less than 0.");
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES + " : " +
-          maxConnectedNMs);
-    }
+    LOG.debug("{} : {}", YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES,
+        maxConnectedNMs);
 
     if (maxConnectedNMs > 0) {
       cmProxy =
@@ -110,10 +108,8 @@ public class ContainerManagementProtocolProxy {
     while (proxy != null
         && !proxy.token.getIdentifier().equals(
             nmTokenCache.getToken(containerManagerBindAddr).getIdentifier())) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Refreshing proxy as NMToken got updated for node : "
-            + containerManagerBindAddr);
-      }
+      LOG.debug("Refreshing proxy as NMToken got updated for node : {}",
+          containerManagerBindAddr);
       // Token is updated. check if anyone has already tried closing it.
       if (!proxy.scheduledForClose) {
         // try closing the proxy. Here if someone is already using it
@@ -149,10 +145,8 @@ public class ContainerManagementProtocolProxy {
   private void addProxyToCache(String containerManagerBindAddr,
       ContainerManagementProtocolProxyData proxy) {
     while (cmProxy.size() >= maxConnectedNMs) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Cleaning up the proxy cache, size=" + cmProxy.size()
-            + " max=" + maxConnectedNMs);
-      }
+      LOG.debug("Cleaning up the proxy cache, size={} max={}", cmProxy.size(),
+          maxConnectedNMs);
       boolean removedProxy = false;
       for (ContainerManagementProtocolProxyData otherProxy : cmProxy.values()) {
         removedProxy = removeProxy(otherProxy);
@@ -193,9 +187,7 @@ public class ContainerManagementProtocolProxy {
       ContainerManagementProtocolProxyData proxy) {
     proxy.activeCallers--;
     if (proxy.scheduledForClose && proxy.activeCallers < 0) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Closing proxy : " + proxy.containerManagerBindAddr);
-      }
+      LOG.debug("Closing proxy : {}", proxy.containerManagerBindAddr);
       cmProxy.remove(proxy.containerManagerBindAddr);
       try {
         rpc.stopProxy(proxy.getContainerManagementProtocol(), conf);
@@ -265,9 +257,7 @@ public class ContainerManagementProtocolProxy {
       
       final InetSocketAddress cmAddr =
           NetUtils.createSocketAddr(containerManagerBindAddr);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Opening proxy : " + containerManagerBindAddr);
-      }
+      LOG.debug("Opening proxy : {}", containerManagerBindAddr);
       // the user in createRemoteUser in this context has to be ContainerID
       UserGroupInformation user =
           UserGroupInformation.createRemoteUser(containerId

+ 15 - 34
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java

@@ -137,27 +137,21 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
     if (locationMap == null) {
       locationMap = new HashMap<>();
       this.remoteRequestsTable.put(priority, locationMap);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added priority=" + priority);
-      }
+      LOG.debug("Added priority={}", priority);
     }
     Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
         execTypeMap = locationMap.get(resourceName);
     if (execTypeMap == null) {
       execTypeMap = new HashMap<>();
       locationMap.put(resourceName, execTypeMap);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added resourceName=" + resourceName);
-      }
+      LOG.debug("Added resourceName={}", resourceName);
     }
     TreeMap<Resource, ResourceRequestInfo> capabilityMap =
         execTypeMap.get(execType);
     if (capabilityMap == null) {
       capabilityMap = new TreeMap<>(new AMRMClientImpl.ResourceReverseComparator());
       execTypeMap.put(execType, capabilityMap);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added Execution Type=" + execType);
-      }
+      LOG.debug("Added Execution Type={}", execType);
     }
     capabilityMap.put(capability, resReqInfo);
   }
@@ -168,25 +162,19 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
     Map<String, Map<ExecutionType, TreeMap<Resource,
         ResourceRequestInfo>>> locationMap = remoteRequestsTable.get(priority);
     if (locationMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No such priority=" + priority);
-      }
+      LOG.debug("No such priority={}", priority);
       return null;
     }
     Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
         execTypeMap = locationMap.get(resourceName);
     if (execTypeMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No such resourceName=" + resourceName);
-      }
+      LOG.debug("No such resourceName={}", resourceName);
       return null;
     }
     TreeMap<Resource, ResourceRequestInfo> capabilityMap =
         execTypeMap.get(execType);
     if (capabilityMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No such Execution Type=" + execType);
-      }
+      LOG.debug("No such Execution Type={}", execType);
       return null;
     }
     retVal = capabilityMap.remove(capability);
@@ -286,9 +274,8 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
     if (ResourceRequest.ANY.equals(resourceName)) {
       resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding request to ask " + resourceRequestInfo.remoteRequest);
-    }
+    LOG.debug("Adding request to ask {}", resourceRequestInfo.remoteRequest);
+
     return resourceRequestInfo;
   }
 
@@ -298,22 +285,16 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
         execTypeReq.getExecutionType(), capability);
 
     if (resourceRequestInfo == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Not decrementing resource as ResourceRequestInfo with" +
-            "priority=" + priority + ", " +
-            "resourceName=" + resourceName + ", " +
-            "executionType=" + execTypeReq + ", " +
-            "capability=" + capability + " is not present in request table");
-      }
+      LOG.debug("Not decrementing resource as ResourceRequestInfo with"
+          + " priority={} resourceName={} executionType={} capability={} is"
+          + " not present in request table", priority, resourceName,
+          execTypeReq, capability);
       return null;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("BEFORE decResourceRequest:" + " applicationId="
-          + " priority=" + priority.getPriority()
-          + " resourceName=" + resourceName + " numContainers="
-          + resourceRequestInfo.remoteRequest.getNumContainers());
-    }
+    LOG.debug("BEFORE decResourceRequest: applicationId= priority={}"
+        +" resourceName={} numContainers={}", priority.getPriority(),
+        resourceName, resourceRequestInfo.remoteRequest.getNumContainers());
 
     resourceRequestInfo.remoteRequest.setNumContainers(
         resourceRequestInfo.remoteRequest.getNumContainers() - 1);

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java

@@ -84,9 +84,7 @@ public class SharedCacheClientImpl extends SharedCacheClient {
   @Override
   protected void serviceStart() throws Exception {
     this.scmClient = createClientProxy();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to Shared Cache Manager at " + this.scmAddress);
-    }
+    LOG.debug("Connecting to Shared Cache Manager at {}", this.scmAddress);
     super.serviceStart();
   }
 

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java

@@ -397,10 +397,8 @@ public class YarnClientImpl extends YarnClient {
       return;
     }
     credentials.addToken(timelineService, timelineDelegationToken);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Add timeline delegation token into credentials: "
-          + timelineDelegationToken);
-    }
+    LOG.debug("Add timeline delegation token into credentials: {}",
+        timelineDelegationToken);
     DataOutputBuffer dob = new DataOutputBuffer();
     credentials.writeTokenStorageToStream(dob);
     tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

+ 9 - 25
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java

@@ -224,10 +224,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{
     if (!entitiesToSummaryCache.isEmpty()) {
       Path summaryLogPath =
           new Path(attemptDir, SUMMARY_LOG_PREFIX + appAttemptId.toString());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Writing summary log for " + appAttemptId.toString() + " to "
-            + summaryLogPath);
-      }
+      LOG.debug("Writing summary log for {} to {}", appAttemptId,
+          summaryLogPath);
       this.logFDsCache.writeSummaryEntityLogs(fs, summaryLogPath, objMapper,
           appAttemptId, entitiesToSummaryCache, isAppendSupported);
     }
@@ -235,10 +233,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
     if (!entitiesToEntityCache.isEmpty()) {
       Path entityLogPath =
           new Path(attemptDir, ENTITY_LOG_PREFIX + groupId.toString());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Writing entity log for " + groupId.toString() + " to "
-            + entityLogPath);
-      }
+      LOG.debug("Writing entity log for {} to {}", groupId, entityLogPath);
       this.logFDsCache.writeEntityLogs(fs, entityLogPath, objMapper,
           appAttemptId, groupId, entitiesToEntityCache, isAppendSupported);
     }
@@ -293,8 +288,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
         new Path(attemptDirCache.getAppAttemptDir(appAttemptId),
             DOMAIN_LOG_PREFIX + appAttemptId.toString());
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Writing domains for " + appAttemptId.toString() + " to "
-          + domainLogPath);
+      LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
     }
     this.logFDsCache.writeDomainLog(
         fs, domainLogPath, objMapper, domain, isAppendSupported);
@@ -324,9 +318,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       if (writerClosed()) {
         prepareForWrite();
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Writing entity list of size " + entities.size());
-      }
+      LOG.debug("Writing entity list of size {}", entities.size());
       for (TimelineEntity entity : entities) {
         getObjectMapper().writeValue(getJsonGenerator(), entity);
       }
@@ -558,9 +550,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
         try {
           flush();
         } catch (Exception e) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(e.toString());
-          }
+          LOG.debug("{}", e);
         }
       }
     }
@@ -997,9 +987,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       Path attemptDir = new Path(appDir, appAttemptId.toString());
       if (FileSystem.mkdirs(fs, attemptDir,
           new FsPermission(APP_LOG_DIR_PERMISSIONS))) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New attempt directory created - " + attemptDir);
-        }
+        LOG.debug("New attempt directory created - {}", attemptDir);
       }
       return attemptDir;
     }
@@ -1009,9 +997,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       Path appDir = new Path(appRootDir, appId.toString());
       if (FileSystem.mkdirs(fs, appDir,
           new FsPermission(APP_LOG_DIR_PERMISSIONS))) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New app directory created - " + appDir);
-        }
+        LOG.debug("New app directory created - {}", appDir);
       }
       return appDir;
     }
@@ -1023,9 +1009,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       Path userDir = new Path(activePath, user);
       if (FileSystem.mkdirs(fs, userDir,
           new FsPermission(APP_LOG_DIR_PERMISSIONS))) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New user directory created - " + userDir);
-        }
+        LOG.debug("New user directory created - {}", userDir);
       }
       return userDir;
     }

+ 4 - 11
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java

@@ -133,11 +133,8 @@ public abstract class TimelineWriter implements Flushable {
       LOG.error(msg);
       if (resp != null) {
         msg += " HTTP error code: " + resp.getStatus();
-        if (LOG.isDebugEnabled()) {
-          String output = resp.getEntity(String.class);
-          LOG.debug("HTTP error code: " + resp.getStatus()
-              + " Server response : \n" + output);
-        }
+        LOG.debug("HTTP error code: {} Server response : \n{}",
+            resp.getStatus(), resp.getEntity(String.class));
       }
       throw new YarnException(msg);
     }
@@ -149,18 +146,14 @@ public abstract class TimelineWriter implements Flushable {
   public ClientResponse doPostingObject(Object object, String path) {
     WebResource webResource = client.resource(resURI);
     if (path == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("POST to " + resURI);
-      }
+      LOG.debug("POST to {}", resURI);
       ClientResponse r = webResource.accept(MediaType.APPLICATION_JSON)
           .type(MediaType.APPLICATION_JSON)
           .post(ClientResponse.class, object);
       r.bufferEntity();
       return r;
     } else if (path.equals("domain")) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("PUT to " + resURI +"/" + path);
-      }
+      LOG.debug("PUT to {}/{}", resURI, path);
       ClientResponse r = webResource.path(path).accept(MediaType.APPLICATION_JSON)
           .type(MediaType.APPLICATION_JSON)
           .put(ClientResponse.class, object);

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java

@@ -189,10 +189,8 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
   @SuppressWarnings("unchecked")
   protected void dispatch(Event event) {
     //all events go thru this loop
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Dispatching the event " + event.getClass().getName() + "."
-          + event.toString());
-    }
+    LOG.debug("Dispatching the event {}.{}", event.getClass().getName(),
+        event);
 
     Class<? extends Enum> type = event.getType().getDeclaringClass();
 

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java

@@ -45,7 +45,7 @@ public class HadoopYarnProtoRPC extends YarnRPC {
   @Override
   public Object getProxy(Class protocol, InetSocketAddress addr,
       Configuration conf) {
-    LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
+    LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol {}", protocol);
     return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
         addr, conf);
   }
@@ -60,8 +60,8 @@ public class HadoopYarnProtoRPC extends YarnRPC {
       InetSocketAddress addr, Configuration conf,
       SecretManager<? extends TokenIdentifier> secretManager,
       int numHandlers, String portRangeConfig) {
-    LOG.debug("Creating a HadoopYarnProtoRpc server for protocol " + protocol + 
-        " with " + numHandlers + " handlers");
+    LOG.debug("Creating a HadoopYarnProtoRpc server for protocol {} with {}"
+        + " handlers", protocol, numHandlers);
     
     return RpcFactoryProvider.getServerFactory(conf).getServer(protocol, 
         instance, addr, conf, secretManager, numHandlers, portRangeConfig);

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java

@@ -57,7 +57,7 @@ public abstract class YarnRPC {
   }
   
   public static YarnRPC create(Configuration conf) {
-    LOG.debug("Creating YarnRPC for " + 
+    LOG.debug("Creating YarnRPC for {}",
         conf.get(YarnConfiguration.IPC_RPC_IMPL));
     String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
     if (clazzName == null) {

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java

@@ -850,10 +850,10 @@ public class LogAggregationIndexedFileController
       }
       if (uuidReadLen != UUID_LENGTH || !Arrays.equals(this.uuid, uuidRead)) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("the length of loaded UUID:" + uuidReadLen);
-          LOG.debug("the loaded UUID:" + new String(uuidRead,
+          LOG.debug("the length of loaded UUID:{}", uuidReadLen);
+          LOG.debug("the loaded UUID:{}", new String(uuidRead,
               Charset.forName("UTF-8")));
-          LOG.debug("the expected UUID:" + new String(this.uuid,
+          LOG.debug("the expected UUID:{}", new String(this.uuid,
               Charset.forName("UTF-8")));
         }
         throw new IOException("The UUID from "

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java

@@ -61,9 +61,7 @@ public class NonAppendableFSNodeLabelStore extends FileSystemNodeLabelsStore {
         fs.delete(oldMirrorPath, false);
       } catch (IOException e) {
         // do nothing
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Exception while removing old mirror", e);
-        }
+        LOG.debug("Exception while removing old mirror", e);
       }
       
       // rename new to old

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java

@@ -43,10 +43,10 @@ public class AMRMTokenSelector implements
     if (service == null) {
       return null;
     }
-    LOG.debug("Looking for a token with service " + service.toString());
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      LOG.debug("Token kind is " + token.getKind().toString()
-          + " and the token's service name is " + token.getService());
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())
           && checkService(service, token)) {
         return (Token<AMRMTokenIdentifier>) token;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java

@@ -326,7 +326,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
 
   @Override
   public void write(DataOutput out) throws IOException {
-    LOG.debug("Writing ContainerTokenIdentifier to RPC layer: " + this);
+    LOG.debug("Writing ContainerTokenIdentifier to RPC layer: {}", this);
     out.write(proto.toByteArray());
   }
 

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java

@@ -45,10 +45,8 @@ public class ContainerTokenSelector implements
       return null;
     }
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Looking for service: " + service + ". Current token is "
-            + token);
-      }
+      LOG.debug("Looking for service: {}. Current token is {}", service,
+          token);
       if (ContainerTokenIdentifier.KIND.equals(token.getKind()) && 
           service.equals(token.getService())) {
         return (Token<ContainerTokenIdentifier>) token;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java

@@ -98,7 +98,7 @@ public class NMTokenIdentifier extends TokenIdentifier {
   
   @Override
   public void write(DataOutput out) throws IOException {
-    LOG.debug("Writing NMTokenIdentifier to RPC layer: " + this);
+    LOG.debug("Writing NMTokenIdentifier to RPC layer: {}", this);
     out.write(proto.toByteArray());
   }
 

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java

@@ -41,10 +41,8 @@ public class NMTokenSelector implements
       return null;
     }
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Looking for service: " + service + ". Current token is "
-            + token);
-      }
+      LOG.debug("Looking for service: {}. Current token is {}", service,
+          token);
       if (NMTokenIdentifier.KIND.equals(token.getKind()) && 
           service.equals(token.getService())) {
         return (Token<NMTokenIdentifier>) token;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java

@@ -70,7 +70,7 @@ public abstract class YarnAuthorizationProvider {
   public static void destroy() {
     synchronized (YarnAuthorizationProvider.class) {
       if (authorizer != null) {
-        LOG.debug(authorizer.getClass().getName() + " is destroyed.");
+        LOG.debug("{} is destroyed.", authorizer.getClass().getName());
         authorizer = null;
       }
     }

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java

@@ -39,10 +39,10 @@ public class ClientToAMTokenSelector implements
     if (service == null) {
       return null;
     }
-    LOG.debug("Looking for a token with service " + service.toString());
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      LOG.debug("Token kind is " + token.getKind().toString()
-          + " and the token's service name is " + token.getService());
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (ClientToAMTokenIdentifier.KIND_NAME.equals(token.getKind())
           && service.equals(token.getService())) {
         return (Token<ClientToAMTokenIdentifier>) token;

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java

@@ -51,10 +51,10 @@ public class RMDelegationTokenSelector implements
     if (service == null) {
       return null;
     }
-    LOG.debug("Looking for a token with service " + service.toString());
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      LOG.debug("Token kind is " + token.getKind().toString()
-          + " and the token's service name is " + token.getService());
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (RMDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
           && checkService(service, token)) {
         return (Token<RMDelegationTokenIdentifier>) token;

+ 3 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java

@@ -43,14 +43,10 @@ public class TimelineDelegationTokenSelector
     if (service == null) {
       return null;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Looking for a token with service " + service.toString());
-    }
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Token kind is " + token.getKind().toString()
-            + " and the token's service name is " + token.getService());
-      }
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (TimelineDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
           && service.equals(token.getService())) {
         return (Token<TimelineDelegationTokenIdentifier>) token;

+ 10 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java

@@ -98,11 +98,8 @@ public class ApplicationACLsManager {
       ApplicationAccessType applicationAccessType, String applicationOwner,
       ApplicationId applicationId) {
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Verifying access-type " + applicationAccessType + " for "
-          + callerUGI + " on application " + applicationId + " owned by "
-          + applicationOwner);
-    }
+    LOG.debug("Verifying access-type {} for {} on application {} owned by {}",
+            applicationAccessType, callerUGI, applicationId, applicationOwner);
 
     String user = callerUGI.getShortUserName();
     if (!areACLsEnabled()) {
@@ -112,21 +109,18 @@ public class ApplicationACLsManager {
     Map<ApplicationAccessType, AccessControlList> acls = this.applicationACLS
         .get(applicationId);
     if (acls == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for application "
-            + applicationId + " owned by "
-            + applicationOwner + ". Using default ["
-            + YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
-      }
+      LOG.debug("ACL not found for application {} owned by {}."
+          + " Using default [{}]", applicationId, applicationOwner,
+          YarnConfiguration.DEFAULT_YARN_APP_ACL);
     } else {
       AccessControlList applicationACLInMap = acls.get(applicationAccessType);
       if (applicationACLInMap != null) {
         applicationACL = applicationACLInMap;
-      } else if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for access-type " + applicationAccessType
-            + " for application " + applicationId + " owned by "
-            + applicationOwner + ". Using default ["
-            + YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
+      } else {
+        LOG.debug("ACL not found for access-type {} for application {}"
+            + " owned by {}. Using default [{}]", applicationAccessType,
+            applicationId, applicationOwner,
+            YarnConfiguration.DEFAULT_YARN_APP_ACL);
       }
     }
 

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java

@@ -141,7 +141,7 @@ public final class DockerClientConfigHandler {
     tokens.rewind();
     if (LOG.isDebugEnabled()) {
       for (Token token : credentials.getAllTokens()) {
-        LOG.debug("Token read from token storage: " + token.toString());
+        LOG.debug("Token read from token storage: {}", token);
       }
     }
     return credentials;
@@ -172,9 +172,7 @@ public final class DockerClientConfigHandler {
           registryUrlNode.put(ti.getRegistryUrl(), registryCredNode);
           registryCredNode.put(CONFIG_AUTH_KEY,
               new String(tk.getPassword(), Charset.forName("UTF-8")));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Prepared token for write: " + tk.toString());
-          }
+          LOG.debug("Prepared token for write: {}", tk);
         }
       }
       if (foundDockerCred) {

+ 5 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java

@@ -394,12 +394,8 @@ public class FSDownload implements Callable<Path> {
       throw new IOException("Invalid resource", e);
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("Starting to download %s %s %s",
-          sCopy,
-          resource.getType(),
-          resource.getPattern()));
-    }
+    LOG.debug("Starting to download {} {} {}", sCopy,
+        resource.getType(), resource.getPattern());
 
     final Path destinationTmp = new Path(destDirPath + "_tmp");
     createDir(destinationTmp, cachePerms);
@@ -420,10 +416,8 @@ public class FSDownload implements Callable<Path> {
       changePermissions(dFinal.getFileSystem(conf), dFinal);
       files.rename(destinationTmp, destDirPath, Rename.OVERWRITE);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(String.format("File has been downloaded to %s from %s",
-            new Path(destDirPath, sCopy.getName()), sCopy));
-      }
+      LOG.debug("File has been downloaded to {} from {}",
+          new Path(destDirPath, sCopy.getName()), sCopy);
     } catch (Exception e) {
       try {
         files.delete(destDirPath, true);
@@ -470,9 +464,7 @@ public class FSDownload implements Callable<Path> {
       perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Changing permissions for path " + path + " to perm " + perm);
-    }
+    LOG.debug("Changing permissions for path {} to perm {}", path, perm);
 
     final FsPermission fPerm = perm;
     if (null == userUgi) {

+ 7 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java

@@ -264,7 +264,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         }
       }
 
-      LOG.debug(this.toString());
+      LOG.debug("{}", this);
 
       if (smapsEnabled) {
         // Update smaps info
@@ -403,13 +403,10 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
               // memory reclaimable by killing the process
               total += info.anonymous;
 
-              if (LOG.isDebugEnabled()) {
-                LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
-                    + ", info : " + info.toString()
-                    + ", total : " + (total * KB_TO_BYTES));
-              }
+              LOG.debug(" total({}): PID : {}, info : {}, total : {}",
+                  olderThanAge, p.getPid(), info, (total * KB_TO_BYTES));
             }
-            LOG.debug(procMemInfo.toString());
+            LOG.debug("{}", procMemInfo);
           }
         }
       }
@@ -468,9 +465,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
   @Override
   public float getCpuUsagePercent() {
     BigInteger processTotalJiffies = getTotalProcessJiffies();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies);
-    }
+    LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies);
     cpuTimeTracker.updateElapsedJiffies(processTotalJiffies,
         clock.getTime());
     return cpuTimeTracker.getCpuTrackerUsagePercent();
@@ -793,9 +788,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
           if (memInfo.find()) {
             String key = memInfo.group(1).trim();
             String value = memInfo.group(2).replace(KB, "").trim();
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MemInfo : " + key + " : Value  : " + value);
-            }
+            LOG.debug("MemInfo : {} : Value  : {}", key, value);
 
             if (memoryMappingInfo != null) {
               memoryMappingInfo.setMemInfo(key, value);
@@ -941,9 +934,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       if (info == null) {
         return;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("setMemInfo : memInfo : " + info);
-      }
+      LOG.debug("setMemInfo : memInfo : {}", info);
       switch (info) {
       case SIZE:
         size = val;

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java

@@ -133,11 +133,11 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
             pInfo.cpuTimeMs = Long.parseLong(procInfo[3]);
             allProcs.put(pInfo.pid, pInfo);
           } catch (NumberFormatException nfe) {
-            LOG.debug("Error parsing procInfo." + nfe);
+            LOG.debug("Error parsing procInfo.", nfe);
           }
         } else {
-          LOG.debug("Expected split length of proc info to be "
-              + procInfoSplitCount + ". Got " + procInfo.length);
+          LOG.debug("Expected split length of proc info to be {}. Got {}",
+              procInfoSplitCount, procInfo.length);
         }
       }
     }

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java

@@ -102,7 +102,7 @@ public class YarnVersionInfo extends VersionInfo {
   }
   
   public static void main(String[] args) {
-    LOG.debug("version: "+ getVersion());
+    LOG.debug("version: {}", getVersion());
     System.out.println("YARN " + getVersion());
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Compiled by " + getUser() + " on " + getDate());

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java

@@ -271,7 +271,7 @@ public class TestProcfsBasedProcessTree {
       fReader = new FileReader(pidFileName);
       pidFile = new BufferedReader(fReader);
     } catch (FileNotFoundException f) {
-      LOG.debug("PidFile doesn't exist : " + pidFileName);
+      LOG.debug("PidFile doesn't exist : {}", pidFileName);
       return pid;
     }
 

+ 6 - 14
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java

@@ -99,16 +99,12 @@ public class DefaultCsiAdaptorImpl implements CsiAdaptorPlugin {
   @Override
   public NodePublishVolumeResponse nodePublishVolume(
       NodePublishVolumeRequest request) throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received nodePublishVolume call, request: {}",
-          request.toString());
-    }
+    LOG.debug("Received nodePublishVolume call, request: {}",
+        request);
     Csi.NodePublishVolumeRequest req = ProtoTranslatorFactory
         .getTranslator(NodePublishVolumeRequest.class,
             Csi.NodePublishVolumeRequest.class).convertTo(request);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Translate to CSI proto message: {}", req.toString());
-    }
+    LOG.debug("Translate to CSI proto message: {}", req);
     csiClient.nodePublishVolume(req);
     return NodePublishVolumeResponse.newInstance();
   }
@@ -116,16 +112,12 @@ public class DefaultCsiAdaptorImpl implements CsiAdaptorPlugin {
   @Override
   public NodeUnpublishVolumeResponse nodeUnpublishVolume(
       NodeUnpublishVolumeRequest request) throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received nodeUnpublishVolume call, request: {}",
-          request.toString());
-    }
+    LOG.debug("Received nodeUnpublishVolume call, request: {}",
+        request);
     Csi.NodeUnpublishVolumeRequest req = ProtoTranslatorFactory
         .getTranslator(NodeUnpublishVolumeRequest.class,
             Csi.NodeUnpublishVolumeRequest.class).convertTo(request);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Translate to CSI proto message: {}", req.toString());
-    }
+    LOG.debug("Translate to CSI proto message: {}", req);
     csiClient.nodeUnpublishVolume(req);
     return NodeUnpublishVolumeResponse.newInstance();
   }

+ 2 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java

@@ -275,9 +275,7 @@ public class AHSWebServices extends WebServices {
         try {
           nodeHttpAddress = getNMWebAddressFromRM(conf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
@@ -420,9 +418,7 @@ public class AHSWebServices extends WebServices {
         try {
           nodeHttpAddress = getNMWebAddressFromRM(conf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {

+ 8 - 18
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java

@@ -1424,9 +1424,7 @@ public class LeveldbTimelineStore extends AbstractService
 
       writeBatch = db.createWriteBatch();
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleting entity type:" + entityType + " id:" + entityId);
-      }
+      LOG.debug("Deleting entity type:{} id:{}", entityType, entityId);
       // remove start time from cache and db
       writeBatch.delete(createStartTimeLookupKey(entityId, entityType));
       EntityIdentifier entityIdentifier =
@@ -1452,11 +1450,8 @@ public class LeveldbTimelineStore extends AbstractService
           Object value = GenericObjectMapper.read(key, kp.getOffset());
           deleteKeysWithPrefix(writeBatch, addPrimaryFilterToKey(name, value,
               deletePrefix), pfIterator);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting entity type:" + entityType + " id:" +
-                entityId + " primary filter entry " + name + " " +
-                value);
-          }
+          LOG.debug("Deleting entity type:{} id:{} primary filter entry {} {}",
+              entityType, entityId, name, value);
         } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
           kp = new KeyParser(key,
               prefixlen + RELATED_ENTITIES_COLUMN.length);
@@ -1471,11 +1466,9 @@ public class LeveldbTimelineStore extends AbstractService
           }
           writeBatch.delete(createReverseRelatedEntityKey(id, type,
               relatedEntityStartTime, entityId, entityType));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting entity type:" + entityType + " id:" +
-                entityId + " from invisible reverse related entity " +
-                "entry of type:" + type + " id:" + id);
-          }
+          LOG.debug("Deleting entity type:{} id:{} from invisible reverse"
+              + " related entity entry of type:{} id:{}", entityType,
+              entityId, type, id);
         } else if (key[prefixlen] ==
             INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
           kp = new KeyParser(key, prefixlen +
@@ -1491,11 +1484,8 @@ public class LeveldbTimelineStore extends AbstractService
           }
           writeBatch.delete(createRelatedEntityKey(id, type,
               relatedEntityStartTime, entityId, entityType));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting entity type:" + entityType + " id:" +
-                entityId + " from related entity entry of type:" +
-                type + " id:" + id);
-          }
+          LOG.debug("Deleting entity type:{} id:{} from related entity entry"
+              +" of type:{} id:{}", entityType, entityId, type, id);
         }
       }
       WriteOptions writeOptions = new WriteOptions();

+ 14 - 32
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java

@@ -413,9 +413,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
       EnumSet<Field> fields) throws IOException {
     Long revStartTime = getStartTimeLong(entityId, entityType);
     if (revStartTime == null) {
-      if ( LOG.isDebugEnabled()) {
-        LOG.debug("Could not find start time for {} {} ", entityType, entityId);
-      }
+      LOG.debug("Could not find start time for {} {} ", entityType, entityId);
       return null;
     }
     byte[] prefix = KeyBuilder.newInstance().add(entityType)
@@ -424,9 +422,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
     DB db = entitydb.getDBForStartTime(revStartTime);
     if (db == null) {
-      if ( LOG.isDebugEnabled()) {
-        LOG.debug("Could not find db for {} {} ", entityType, entityId);
-      }
+      LOG.debug("Could not find db for {} {} ", entityType, entityId);
       return null;
     }
     try (DBIterator iterator = db.iterator()) {
@@ -1163,9 +1159,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
   @Override
   public TimelinePutResponse put(TimelineEntities entities) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Starting put");
-    }
+    LOG.debug("Starting put");
     TimelinePutResponse response = new TimelinePutResponse();
     TreeMap<Long, RollingWriteBatch> entityUpdates =
         new TreeMap<Long, RollingWriteBatch>();
@@ -1199,11 +1193,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
         indexRollingWriteBatch.close();
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Put " + entityCount + " new leveldb entity entries and "
-          + indexCount + " new leveldb index entries from "
-          + entities.getEntities().size() + " timeline entities");
-    }
+    LOG.debug("Put {} new leveldb entity entries and {} new leveldb index"
+        + " entries from {} timeline entities", entityCount, indexCount,
+        entities.getEntities().size());
     return response;
   }
 
@@ -1521,16 +1513,11 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
           // a large delete will hold the lock for too long
           if (batchSize >= writeBatchSize) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Preparing to delete a batch of " + batchSize
-                  + " old start times");
-            }
+            LOG.debug("Preparing to delete a batch of {} old start times",
+                batchSize);
             starttimedb.write(writeBatch);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Deleted batch of " + batchSize
-                  + ". Total start times deleted so far this cycle: "
-                  + startTimesCount);
-            }
+            LOG.debug("Deleted batch of {}. Total start times deleted"
+                + " so far this cycle: {}", batchSize, startTimesCount);
             IOUtils.cleanupWithLogger(LOG, writeBatch);
             writeBatch = starttimedb.createWriteBatch();
             batchSize = 0;
@@ -1538,16 +1525,11 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
         }
         ++totalCount;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Preparing to delete a batch of " + batchSize
-            + " old start times");
-      }
+      LOG.debug("Preparing to delete a batch of {} old start times",
+          batchSize);
       starttimedb.write(writeBatch);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleted batch of " + batchSize
-            + ". Total start times deleted so far this cycle: "
-            + startTimesCount);
-      }
+      LOG.debug("Deleted batch of {}. Total start times deleted so far"
+          + " this cycle: {}", batchSize, startTimesCount);
       LOG.info("Deleted " + startTimesCount + "/" + totalCount
           + " start time entities earlier than " + minStartTime);
     } finally {

+ 3 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java

@@ -127,12 +127,9 @@ public class TimelineACLsManager {
     String owner = aclExt.owner;
     AccessControlList domainACL = aclExt.acls.get(applicationAccessType);
     if (domainACL == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for access-type " + applicationAccessType
-            + " for domain " + entity.getDomainId() + " owned by "
-            + owner + ". Using default ["
-            + YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
-      }
+      LOG.debug("ACL not found for access-type {} for domain {} owned by {}."
+          + " Using default [{}]", applicationAccessType,
+          entity.getDomainId(), owner, YarnConfiguration.DEFAULT_YARN_APP_ACL);
       domainACL =
           new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL);
     }

+ 5 - 15
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java

@@ -139,9 +139,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
 
     @Override
     protected void storeNewMasterKey(DelegationKey key) throws IOException {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing master key " + key.getKeyId());
-      }
+      LOG.debug("Storing master key {}", key.getKeyId());
       try {
         if (stateStore != null) {
           stateStore.storeTokenMasterKey(key);
@@ -153,9 +151,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
 
     @Override
     protected void removeStoredMasterKey(DelegationKey key) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing master key " + key.getKeyId());
-      }
+      LOG.debug("Removing master key {}", key.getKeyId());
       try {
         if (stateStore != null) {
           stateStore.removeTokenMasterKey(key);
@@ -168,9 +164,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
     @Override
     protected void storeNewToken(TimelineDelegationTokenIdentifier tokenId,
         long renewDate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing token " + tokenId.getSequenceNumber());
-      }
+      LOG.debug("Storing token {}", tokenId.getSequenceNumber());
       try {
         if (stateStore != null) {
           stateStore.storeToken(tokenId, renewDate);
@@ -183,9 +177,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
     @Override
     protected void removeStoredToken(TimelineDelegationTokenIdentifier tokenId)
         throws IOException {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing token " + tokenId.getSequenceNumber());
-      }
+      LOG.debug("Storing token {}", tokenId.getSequenceNumber());
       try {
         if (stateStore != null) {
           stateStore.removeToken(tokenId);
@@ -198,9 +190,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
     @Override
     protected void updateStoredToken(TimelineDelegationTokenIdentifier tokenId,
         long renewDate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Updating token " + tokenId.getSequenceNumber());
-      }
+      LOG.debug("Updating token {}", tokenId.getSequenceNumber());
       try {
         if (stateStore != null) {
           stateStore.updateToken(tokenId, renewDate);

+ 7 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java

@@ -105,11 +105,9 @@ public class AMHeartbeatRequestHandler extends Thread {
         if (request == null) {
           throw new YarnException("Null allocateRequest from requestInfo");
         }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Sending Heartbeat to RM. AskList:"
-              + ((request.getAskList() == null) ? " empty"
-                  : request.getAskList().size()));
-        }
+        LOG.debug("Sending Heartbeat to RM. AskList:{}",
+            ((request.getAskList() == null) ? " empty" :
+            request.getAskList().size()));
 
         request.setResponseId(lastResponseId);
         AllocateResponse response = rmProxyRelayer.allocate(request);
@@ -125,20 +123,16 @@ public class AMHeartbeatRequestHandler extends Thread {
               userUgi, conf);
         }
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Received Heartbeat reply from RM. Allocated Containers:"
-              + ((response.getAllocatedContainers() == null) ? " empty"
-                  : response.getAllocatedContainers().size()));
-        }
+        LOG.debug("Received Heartbeat reply from RM. Allocated Containers:{}",
+            ((response.getAllocatedContainers() == null) ? " empty"
+            : response.getAllocatedContainers().size()));
 
         if (requestInfo.getCallback() == null) {
           throw new YarnException("Null callback from requestInfo");
         }
         requestInfo.getCallback().callback(response);
       } catch (InterruptedException ex) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Interrupted while waiting for queue", ex);
-        }
+        LOG.debug("Interrupted while waiting for queue", ex);
       } catch (Throwable ex) {
         LOG.warn(
             "Error occurred while processing heart beat for " + applicationId,

+ 2 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java

@@ -265,11 +265,8 @@ public class LocalityMulticastAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
       // any cluster. Pick a random sub-cluster from active and enabled ones.
       targetId = getSubClusterForUnResolvedRequest(bookkeeper,
           rr.getAllocationRequestId());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ERROR resolving sub-cluster for resourceName: "
-            + rr.getResourceName() + ", picked a random subcluster to forward:"
-            + targetId);
-      }
+      LOG.debug("ERROR resolving sub-cluster for resourceName: {}, picked a "
+          + "random subcluster to forward:{}", rr.getResourceName(), targetId);
       if (targetIds != null && targetIds.size() > 0) {
         bookkeeper.addRackRR(targetId, rr);
       } else {

+ 6 - 12
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java

@@ -436,10 +436,8 @@ public class SQLFederationStateStore implements FederationStateStore {
             "SubCluster " + subClusterId.toString() + " does not exist";
         FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got the information about the specified SubCluster "
-            + subClusterInfo.toString());
-      }
+      LOG.debug("Got the information about the specified SubCluster {}",
+          subClusterInfo);
     } catch (SQLException e) {
       FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
@@ -700,10 +698,8 @@ public class SQLFederationStateStore implements FederationStateStore {
         FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got the information about the specified application  "
-            + request.getApplicationId() + ". The AM is running in " + homeRM);
-      }
+      LOG.debug("Got the information about the specified application {}."
+          + " The AM is running in {}", request.getApplicationId(), homeRM);
 
       FederationStateStoreClientMetrics
           .succeededStateStoreCall(stopTime - startTime);
@@ -852,10 +848,8 @@ public class SQLFederationStateStore implements FederationStateStore {
         subClusterPolicyConfiguration =
             SubClusterPolicyConfiguration.newInstance(request.getQueue(),
                 cstmt.getString(2), ByteBuffer.wrap(cstmt.getBytes(3)));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Selected from StateStore the policy for the queue: "
-              + subClusterPolicyConfiguration.toString());
-        }
+        LOG.debug("Selected from StateStore the policy for the queue: {}",
+            subClusterPolicyConfiguration);
       } else {
         LOG.warn("Policy for queue: {} does not exist.", request.getQueue());
         return null;

+ 3 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java

@@ -112,11 +112,9 @@ public class BaseContainerTokenSecretManager extends
   protected byte[] retrievePasswordInternal(ContainerTokenIdentifier identifier,
       MasterKeyData masterKey)
       throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Retrieving password for {} for user {} to be run on NM {}",
-          identifier.getContainerID(), identifier.getUser(),
-          identifier.getNmHostAddress());
-    }
+    LOG.debug("Retrieving password for {} for user {} to be run on NM {}",
+        identifier.getContainerID(), identifier.getUser(),
+        identifier.getNmHostAddress());
     return createPassword(identifier.getBytes(), masterKey.getSecretKey());
   }
 

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java

@@ -225,12 +225,12 @@ public class UnmanagedApplicationManager {
     this.heartbeatHandler.resetLastResponseId();
 
     for (Container container : response.getContainersFromPreviousAttempts()) {
-      LOG.debug("RegisterUAM returned existing running container "
-          + container.getId());
+      LOG.debug("RegisterUAM returned existing running container {}",
+          container.getId());
     }
     for (NMToken nmToken : response.getNMTokensFromPreviousAttempts()) {
-      LOG.debug("RegisterUAM returned existing NM token for node "
-          + nmToken.getNodeId());
+      LOG.debug("RegisterUAM returned existing NM token for node {}",
+          nmToken.getNodeId());
     }
     LOG.info(
         "RegisterUAM returned {} existing running container and {} NM tokens",

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java

@@ -153,7 +153,7 @@ public final class YarnServerSecurityUtils {
       credentials.readTokenStorageStream(buf);
       if (LOG.isDebugEnabled()) {
         for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
-          LOG.debug(tk.getService() + " = " + tk.toString());
+          LOG.debug("{}={}", tk.getService(), tk);
         }
       }
     }

+ 2 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java

@@ -179,9 +179,7 @@ import java.security.PrivilegedExceptionAction;
           nodeHttpAddress =
               LogWebServiceUtils.getNMWebAddressFromRM(yarnConf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
@@ -384,9 +382,7 @@ import java.security.PrivilegedExceptionAction;
           nodeHttpAddress =
               LogWebServiceUtils.getNMWebAddressFromRM(yarnConf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java

@@ -573,10 +573,8 @@ public class DefaultContainerExecutor extends ContainerExecutor {
     String user = ctx.getUser();
     String pid = ctx.getPid();
     Signal signal = ctx.getSignal();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending signal " + signal.getValue() + " to pid " + pid
-          + " as user " + user);
-    }
+    LOG.debug("Sending signal {} to pid {} as user {}",
+        signal.getValue(), pid, user);
     if (!containerIsAlive(pid)) {
       return false;
     }

+ 2 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java

@@ -85,11 +85,8 @@ public class DeletionService extends AbstractService {
 
   public void delete(DeletionTask deletionTask) {
     if (debugDelay != -1) {
-      if (LOG.isDebugEnabled()) {
-        String msg = String.format("Scheduling DeletionTask (delay %d) : %s",
-            debugDelay, deletionTask.toString());
-        LOG.debug(msg);
-      }
+      LOG.debug("Scheduling DeletionTask (delay {}) : {}", debugDelay,
+          deletionTask);
       recordDeletionTaskInStateStore(deletionTask);
       sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS);
     }

+ 3 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java

@@ -314,12 +314,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
     try {
       resourceHandlerChain = ResourceHandlerModule
           .getConfiguredResourceHandlerChain(conf, nmContext);
-      if (LOG.isDebugEnabled()) {
-        final boolean enabled = resourceHandlerChain != null;
-        LOG.debug("Resource handler chain enabled = " + enabled);
-      }
+      LOG.debug("Resource handler chain enabled = {}",
+          (resourceHandlerChain != null));
       if (resourceHandlerChain != null) {
-        LOG.debug("Bootstrapping resource handler chain: " +
+        LOG.debug("Bootstrapping resource handler chain: {}",
             resourceHandlerChain);
         resourceHandlerChain.bootstrap(conf);
       }

+ 7 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java

@@ -200,11 +200,8 @@ public class NodeManager extends CompositeService
                 + e.getMessage(), e);
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Distributed Node Attributes is enabled"
-          + " with provider class as : "
-          + attributesProvider.getClass().toString());
-    }
+    LOG.debug("Distributed Node Attributes is enabled with provider class"
+        + " as : {}", attributesProvider.getClass());
     return attributesProvider;
   }
 
@@ -238,10 +235,8 @@ public class NodeManager extends CompositeService
             "Failed to create NodeLabelsProvider : " + e.getMessage(), e);
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Distributed Node Labels is enabled"
-          + " with provider class as : " + provider.getClass().toString());
-    }
+    LOG.debug("Distributed Node Labels is enabled"
+        + " with provider class as : {}", provider.getClass());
     return provider;
   }
 
@@ -617,14 +612,10 @@ public class NodeManager extends CompositeService
           && !ApplicationState.FINISHED.equals(app.getApplicationState())) {
         registeringCollectors.putIfAbsent(entry.getKey(), entry.getValue());
         AppCollectorData data = entry.getValue();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(entry.getKey() + " : " + data.getCollectorAddr() + "@<"
-              + data.getRMIdentifier() + ", " + data.getVersion() + ">");
-        }
+        LOG.debug("{} : {}@<{}, {}>", entry.getKey(), data.getCollectorAddr(),
+            data.getRMIdentifier(), data.getVersion());
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Remove collector data for done app " + entry.getKey());
-        }
+        LOG.debug("Remove collector data for done app {}", entry.getKey());
       }
     }
     knownCollectors.clear();

+ 21 - 43
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java

@@ -243,10 +243,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       LOG.error(message);
       throw new YarnException(message);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS + " :"
-        + durationToTrackStoppedContainers);
-    }
+    LOG.debug("{} :{}", YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,
+        durationToTrackStoppedContainers);
     super.serviceInit(conf);
     LOG.info("Initialized nodemanager with :" +
         " physical-memory=" + memoryMb + " virtual-memory=" + virtualMemoryMb +
@@ -406,10 +404,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         List<LogAggregationReport> logAggregationReports =
             context.getNMLogAggregationStatusTracker()
                 .pullCachedLogAggregationReports();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The cache log aggregation status size:"
-              + logAggregationReports.size());
-        }
+        LOG.debug("The cache log aggregation status size:{}",
+            logAggregationReports.size());
         if (logAggregationReports != null
             && !logAggregationReports.isEmpty()) {
           request.setLogAggregationReportsForApps(logAggregationReports);
@@ -519,10 +515,9 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     nodeHealthStatus.setIsNodeHealthy(healthChecker.isHealthy());
     nodeHealthStatus.setLastHealthReportTime(healthChecker
       .getLastHealthReportTime());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Node's health-status : " + nodeHealthStatus.getIsNodeHealthy()
-          + ", " + nodeHealthStatus.getHealthReport());
-    }
+    LOG.debug("Node's health-status : {}, {}",
+        nodeHealthStatus.getIsNodeHealthy(),
+        nodeHealthStatus.getHealthReport());
     List<ContainerStatus> containersStatuses = getContainerStatuses();
     ResourceUtilization containersUtilization = getContainersUtilization();
     ResourceUtilization nodeUtilization = getNodeUtilization();
@@ -603,10 +598,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
           container.cloneAndGetContainerStatus();
       if (containerStatus.getState() == ContainerState.COMPLETE) {
         if (isApplicationStopped(applicationId)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(applicationId + " is completing, " + " remove "
-                + containerId + " from NM context.");
-          }
+          LOG.debug("{} is completing, remove {} from NM context.",
+              applicationId, containerId);
           context.getContainers().remove(containerId);
           pendingCompletedContainers.put(containerId, containerStatus);
         } else {
@@ -624,11 +617,9 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     }
 
     containerStatuses.addAll(pendingCompletedContainers.values());
+    LOG.debug("Sending out {} container statuses: {}",
+        containerStatuses.size(), containerStatuses);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending out " + containerStatuses.size()
-          + " container statuses: " + containerStatuses);
-    }
     return containerStatuses;
   }
 
@@ -815,8 +806,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     }
     if (LOG.isDebugEnabled()) {
       for (Map.Entry<ApplicationId, Credentials> entry : map.entrySet()) {
-        LOG.debug("Retrieved credentials form RM for " + entry.getKey() + ": "
-            + entry.getValue().getAllTokens());
+        LOG.debug("Retrieved credentials form RM for {}: {}",
+            entry.getKey(), entry.getValue().getAllTokens());
       }
     }
     return map;
@@ -1126,10 +1117,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         NodeHeartbeatResponse response) {
       if (isValueSented()) {
         if (response.getAreNodeAttributesAcceptedByRM()) {
-          if(LOG.isDebugEnabled()){
-            LOG.debug("Node attributes {" + getPreviousValue()
-                + "} were Accepted by RM ");
-          }
+          LOG.debug("Node attributes {{}} were Accepted by RM ",
+              getPreviousValue());
         } else {
           // case where updated node attributes from NodeAttributesProvider
           // is sent to RM and RM rejected the attributes
@@ -1279,11 +1268,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         NodeHeartbeatResponse response) {
       if (isValueSented()) {
         if (response.getAreNodeLabelsAcceptedByRM()) {
-          if(LOG.isDebugEnabled()){
-            LOG.debug(
-                "Node Labels {" + StringUtils.join(",", getPreviousValue())
-                    + "} were Accepted by RM ");
-          }
+          LOG.debug("Node Labels {{}} were Accepted by RM",
+              StringUtils.join(",", getPreviousValue()));
         } else {
           // case where updated labels from NodeLabelsProvider is sent to RM and
           // RM rejected the labels
@@ -1410,10 +1396,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
           Resource newResource = response.getResource();
           if (newResource != null) {
             updateNMResource(newResource);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Node's resource is updated to " +
-                  newResource.toString());
-            }
+            LOG.debug("Node's resource is updated to {}", newResource);
           }
           if (timelineServiceV2Enabled) {
             updateTimelineCollectorData(response);
@@ -1453,9 +1436,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       Map<ApplicationId, AppCollectorData> incomingCollectorsMap =
           response.getAppCollectors();
       if (incomingCollectorsMap == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("No collectors to update RM");
-        }
+        LOG.debug("No collectors to update RM");
         return;
       }
       Map<ApplicationId, AppCollectorData> knownCollectors =
@@ -1472,11 +1453,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
           // the known data (updates the known data).
           AppCollectorData existingData = knownCollectors.get(appId);
           if (AppCollectorData.happensBefore(existingData, collectorData)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Sync a new collector address: "
-                  + collectorData.getCollectorAddr()
-                  + " for application: " + appId + " from RM.");
-            }
+            LOG.debug("Sync a new collector address: {} for application: {}"
+                + " from RM.", collectorData.getCollectorAddr(), appId);
             // Update information for clients.
             NMTimelinePublisher nmTimelinePublisher =
                 context.getNMTimelinePublisher();

+ 4 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java

@@ -247,11 +247,11 @@ public class AMRMProxyService extends CompositeService implements
         // Retrieve the AM container credentials from NM context
         Credentials amCred = null;
         for (Container container : this.nmContext.getContainers().values()) {
-          LOG.debug("From NM Context container " + container.getContainerId());
+          LOG.debug("From NM Context container {}", container.getContainerId());
           if (container.getContainerId().getApplicationAttemptId().equals(
               attemptId) && container.getContainerTokenIdentifier() != null) {
-            LOG.debug("Container type "
-                + container.getContainerTokenIdentifier().getContainerType());
+            LOG.debug("Container type {}",
+                container.getContainerTokenIdentifier().getContainerType());
             if (container.getContainerTokenIdentifier()
                 .getContainerType() == ContainerType.APPLICATION_MASTER) {
               LOG.info("AM container {} found in context, has credentials: {}",
@@ -764,9 +764,7 @@ public class AMRMProxyService extends CompositeService implements
           AMRMProxyService.this.stopApplication(event.getApplicationID());
           break;
         default:
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("AMRMProxy is ignoring event: " + event.getType());
-          }
+          LOG.debug("AMRMProxy is ignoring event: {}", event.getType());
           break;
         }
       } else {

+ 1 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java

@@ -248,10 +248,7 @@ public class AMRMProxyTokenSecretManager extends
     try {
       ApplicationAttemptId applicationAttemptId =
           identifier.getApplicationAttemptId();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to retrieve password for "
-            + applicationAttemptId);
-      }
+      LOG.debug("Trying to retrieve password for {}", applicationAttemptId);
       if (!appAttemptSet.contains(applicationAttemptId)) {
         throw new InvalidToken(applicationAttemptId
             + " not found in AMRMProxyTokenSecretManager.");

+ 3 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java

@@ -129,9 +129,7 @@ public final class DefaultRequestInterceptor extends
   @Override
   public AllocateResponse allocate(final AllocateRequest request)
       throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Forwarding allocate request to the real YARN RM");
-    }
+    LOG.debug("Forwarding allocate request to the real YARN RM");
     AllocateResponse allocateResponse = rmClient.allocate(request);
     if (allocateResponse.getAMRMToken() != null) {
       YarnServerSecurityUtils.updateAMRMToken(allocateResponse.getAMRMToken(),
@@ -161,10 +159,8 @@ public final class DefaultRequestInterceptor extends
   public DistributedSchedulingAllocateResponse allocateForDistributedScheduling(
       DistributedSchedulingAllocateRequest request)
       throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Forwarding allocateForDistributedScheduling request" +
-          "to the real YARN RM");
-    }
+    LOG.debug("Forwarding allocateForDistributedScheduling request" +
+        "to the real YARN RM");
     if (getApplicationContext().getNMCotext()
         .isDistributedSchedulingEnabled()) {
       DistributedSchedulingAllocateResponse allocateResponse =

+ 7 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java

@@ -401,7 +401,7 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
             amrmToken.decodeFromUrlString(
                 new String(entry.getValue(), STRING_TO_BYTE_FORMAT));
             uamMap.put(scId, amrmToken);
-            LOG.debug("Recovered UAM in " + scId + " from NMSS");
+            LOG.debug("Recovered UAM in {} from NMSS", scId);
           }
         }
         LOG.info("Found {} existing UAMs for application {} in NMStateStore",
@@ -443,8 +443,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
               .getContainersFromPreviousAttempts()) {
             containerIdToSubClusterIdMap.put(container.getId(), subClusterId);
             containers++;
-            LOG.debug("  From subcluster " + subClusterId
-                + " running container " + container.getId());
+            LOG.debug("  From subcluster {} running container {}",
+                subClusterId, container.getId());
           }
           LOG.info("Recovered {} running containers from UAM in {}",
               response.getContainersFromPreviousAttempts().size(),
@@ -471,8 +471,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
         containerIdToSubClusterIdMap.put(container.getContainerId(),
             this.homeSubClusterId);
         containers++;
-        LOG.debug("  From home RM " + this.homeSubClusterId
-            + " running container " + container.getContainerId());
+        LOG.debug("  From home RM {} running container {}",
+            this.homeSubClusterId, container.getContainerId());
       }
       LOG.info("{} running containers including AM recovered from home RM {}",
           response.getContainerList().size(), this.homeSubClusterId);
@@ -797,10 +797,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
         try {
           Future<FinishApplicationMasterResponseInfo> future = compSvc.take();
           FinishApplicationMasterResponseInfo uamResponse = future.get();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Received finish application response from RM: "
-                + uamResponse.getSubClusterId());
-          }
+          LOG.debug("Received finish application response from RM: {}",
+              uamResponse.getSubClusterId());
           if (uamResponse.getResponse() == null
               || !uamResponse.getResponse().getIsUnregistered()) {
             failedToUnRegister = true;

+ 3 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java

@@ -52,22 +52,16 @@ public final class NMProtoUtils {
     int taskId = proto.getId();
     if (proto.hasTaskType() && proto.getTaskType() != null) {
       if (proto.getTaskType().equals(DeletionTaskType.FILE.name())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Converting recovered FileDeletionTask");
-        }
+        LOG.debug("Converting recovered FileDeletionTask");
         return convertProtoToFileDeletionTask(proto, deletionService, taskId);
       } else if (proto.getTaskType().equals(
           DeletionTaskType.DOCKER_CONTAINER.name())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Converting recovered DockerContainerDeletionTask");
-        }
+        LOG.debug("Converting recovered DockerContainerDeletionTask");
         return convertProtoToDockerContainerDeletionTask(proto, deletionService,
             taskId);
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Unable to get task type, trying FileDeletionTask");
-    }
+    LOG.debug("Unable to get task type, trying FileDeletionTask");
     return convertProtoToFileDeletionTask(proto, deletionService, taskId);
   }
 

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java

@@ -638,9 +638,7 @@ public class AuxServices extends AbstractService
             .getName());
         loadedAuxServices.add(service.getName());
         if (existingService != null && existingService.equals(service)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Auxiliary service already loaded: " + service.getName());
-          }
+          LOG.debug("Auxiliary service already loaded: {}", service.getName());
           continue;
         }
         foundChanges = true;

+ 9 - 20
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java

@@ -368,9 +368,7 @@ public class ContainerManagerImpl extends CompositeService implements
                appsState.getIterator()) {
         while (rasIterator.hasNext()) {
           ContainerManagerApplicationProto proto = rasIterator.next();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Recovering application with state: " + proto.toString());
-          }
+          LOG.debug("Recovering application with state: {}", proto);
           recoverApplication(proto);
         }
       }
@@ -379,9 +377,7 @@ public class ContainerManagerImpl extends CompositeService implements
                stateStore.getContainerStateIterator()) {
         while (rcsIterator.hasNext()) {
           RecoveredContainerState rcs = rcsIterator.next();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Recovering container with state: " + rcs);
-          }
+          LOG.debug("Recovering container with state: {}", rcs);
           recoverContainer(rcs);
         }
       }
@@ -428,20 +424,16 @@ public class ContainerManagerImpl extends CompositeService implements
       FlowContextProto fcp = p.getFlowContext();
       fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
           fcp.getFlowRunId());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Recovering Flow context: " + fc + " for an application " + appId);
-      }
+      LOG.debug(
+          "Recovering Flow context: {} for an application {}", fc, appId);
     } else {
       // in upgrade situations, where there is no prior existing flow context,
       // default would be used.
       fc = new FlowContext(TimelineUtils.generateDefaultFlowName(null, appId),
           YarnConfiguration.DEFAULT_FLOW_VERSION, appId.getClusterTimestamp());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "No prior existing flow context found. Using default Flow context: "
-                + fc + " for an application " + appId);
-      }
+      LOG.debug(
+          "No prior existing flow context found. Using default Flow context: "
+          + "{} for an application {}", fc, appId);
     }
 
     LOG.info("Recovering application " + appId);
@@ -1206,11 +1198,8 @@ public class ContainerManagerImpl extends CompositeService implements
         flowRunId = Long.parseLong(flowRunIdStr);
       }
       flowContext = new FlowContext(flowName, flowVersion, flowRunId);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Flow context: " + flowContext + " created for an application "
-                + applicationID);
-      }
+      LOG.debug("Flow context: {} created for an application {}",
+          flowContext, applicationID);
     }
     return flowContext;
   }

+ 1 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java

@@ -639,10 +639,7 @@ public class ApplicationImpl implements Application {
 
     try {
       ApplicationId applicationID = event.getApplicationID();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Processing " + applicationID + " of type " + event.getType());
-      }
+      LOG.debug("Processing {} of type {}", applicationID, event.getType());
       ApplicationState oldState = stateMachine.getCurrentState();
       ApplicationState newState = null;
       try {

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java

@@ -2110,9 +2110,7 @@ public class ContainerImpl implements Container {
     this.writeLock.lock();
     try {
       ContainerId containerID = event.getContainerID();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing " + containerID + " of type " + event.getType());
-      }
+      LOG.debug("Processing {} of type {}", containerID, event.getType());
       ContainerState oldState = stateMachine.getCurrentState();
       ContainerState newState = null;
       try {

+ 1 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DockerContainerDeletionTask.java

@@ -52,10 +52,7 @@ public class DockerContainerDeletionTask extends DeletionTask
    */
   @Override
   public void run() {
-    if (LOG.isDebugEnabled()) {
-      String msg = String.format("Running DeletionTask : %s", toString());
-      LOG.debug(msg);
-    }
+    LOG.debug("Running DeletionTask : {}", this);
     LinuxContainerExecutor exec = ((LinuxContainerExecutor)
         getDeletionService().getContainerExecutor());
     exec.removeDockerContainer(containerId);

+ 4 - 14
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java

@@ -95,16 +95,11 @@ public class FileDeletionTask extends DeletionTask implements Runnable {
    */
   @Override
   public void run() {
-    if (LOG.isDebugEnabled()) {
-      String msg = String.format("Running DeletionTask : %s", toString());
-      LOG.debug(msg);
-    }
+    LOG.debug("Running DeletionTask : {}", this);
     boolean error = false;
     if (null == getUser()) {
       if (baseDirs == null || baseDirs.size() == 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("NM deleting absolute path : " + subDir);
-        }
+        LOG.debug("NM deleting absolute path : {}", subDir);
         try {
           lfs.delete(subDir, true);
         } catch (IOException e) {
@@ -114,9 +109,7 @@ public class FileDeletionTask extends DeletionTask implements Runnable {
       } else {
         for (Path baseDir : baseDirs) {
           Path del = subDir == null? baseDir : new Path(baseDir, subDir);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("NM deleting path : " + del);
-          }
+          LOG.debug("NM deleting path : {}", del);
           try {
             lfs.delete(del, true);
           } catch (IOException e) {
@@ -127,10 +120,7 @@ public class FileDeletionTask extends DeletionTask implements Runnable {
       }
     } else {
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Deleting path: [" + subDir + "] as user: [" + getUser() + "]");
-        }
+        LOG.debug("Deleting path: [{}] as user [{}]", subDir, getUser());
         if (baseDirs == null || baseDirs.size() == 0) {
           getDeletionService().getContainerExecutor().deleteAsUser(
               new DeletionAsUserContext.Builder()

+ 12 - 22
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java

@@ -102,19 +102,14 @@ public class ContainerCleanup implements Runnable {
           + " No cleanup needed to be done");
       return;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Marking container " + containerIdStr + " as inactive");
-    }
+    LOG.debug("Marking container {} as inactive", containerIdStr);
     // this should ensure that if the container process has not launched
     // by this time, it will never be launched
     exec.deactivateContainer(containerId);
     Path pidFilePath = launch.getPidFilePath();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Getting pid for container {} to kill"
-              + " from pid file {}", containerIdStr, pidFilePath != null ?
-          pidFilePath : "null");
-    }
-
+    LOG.debug("Getting pid for container {} to kill"
+        + " from pid file {}", containerIdStr, pidFilePath != null ?
+        pidFilePath : "null");
     // however the container process may have already started
     try {
 
@@ -194,20 +189,17 @@ public class ContainerCleanup implements Runnable {
 
   private void signalProcess(String processId, String user,
       String containerIdStr) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending signal to pid " + processId + " as user " + user
-          + " for container " + containerIdStr);
-    }
+    LOG.debug("Sending signal to pid {} as user {} for container {}",
+        processId, user, containerIdStr);
     final ContainerExecutor.Signal signal =
         sleepDelayBeforeSigKill > 0 ? ContainerExecutor.Signal.TERM :
             ContainerExecutor.Signal.KILL;
 
     boolean result = sendSignal(user, processId, signal);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sent signal " + signal + " to pid " + processId + " as user "
-          + user + " for container " + containerIdStr + ", result="
-          + (result ? "success" : "failed"));
-    }
+    LOG.debug("Sent signal {} to pid {} as user {} for container {},"
+        + " result={}", signal, processId, user, containerIdStr,
+        (result ? "success" : "failed"));
+
     if (sleepDelayBeforeSigKill > 0) {
       new ContainerExecutor.DelayedProcessKiller(container, user, processId,
           sleepDelayBeforeSigKill, ContainerExecutor.Signal.KILL, exec).start();
@@ -232,9 +224,7 @@ public class ContainerCleanup implements Runnable {
             .setContainer(container)
             .setUser(container.getUser())
             .build());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sent signal to docker container " + containerIdStr
-          + " as user " + user + ", result=" + (result ? "success" : "failed"));
-    }
+    LOG.debug("Sent signal to docker container {} as user {}, result={}",
+        containerIdStr, user, (result ? "success" : "failed"));
   }
 }

+ 10 - 23
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java

@@ -647,11 +647,8 @@ public class ContainerLaunch implements Callable<Integer> {
 
   protected void handleContainerExitCode(int exitCode, Path containerLogDir) {
     ContainerId containerId = container.getContainerId();
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Container " + containerId + " completed with exit code "
-          + exitCode);
-    }
+    LOG.debug("Container {} completed with exit code {}", containerId,
+        exitCode);
 
     StringBuilder diagnosticInfo =
         new StringBuilder("Container exited with a non-zero exit code ");
@@ -840,22 +837,17 @@ public class ContainerLaunch implements Callable<Integer> {
       return;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Getting pid for container " + containerIdStr
-          + " to send signal to from pid file "
-          + (pidFilePath != null ? pidFilePath.toString() : "null"));
-    }
+    LOG.debug("Getting pid for container {} to send signal to from pid"
+        + " file {}", containerIdStr,
+        (pidFilePath != null ? pidFilePath.toString() : "null"));
 
     try {
       // get process id from pid file if available
       // else if shell is still active, get it from the shell
       String processId = getContainerPid();
       if (processId != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Sending signal to pid " + processId
-              + " as user " + user
-              + " for container " + containerIdStr);
-        }
+        LOG.debug("Sending signal to pid {} as user {} for container {}",
+            processId, user, containerIdStr);
 
         boolean result = exec.signalContainer(
             new ContainerSignalContext.Builder()
@@ -1013,10 +1005,8 @@ public class ContainerLaunch implements Callable<Integer> {
     String containerIdStr = 
         container.getContainerId().toString();
     String processId;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Accessing pid for container " + containerIdStr
-          + " from pid file " + pidFilePath);
-    }
+    LOG.debug("Accessing pid for container {} from pid file {}",
+        containerIdStr, pidFilePath);
     int sleepCounter = 0;
     final int sleepInterval = 100;
 
@@ -1025,10 +1015,7 @@ public class ContainerLaunch implements Callable<Integer> {
     while (true) {
       processId = ProcessIdFileReader.getProcessId(pidFilePath);
       if (processId != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Got pid " + processId + " for container " + containerIdStr);
-        }
+        LOG.debug("Got pid {} for container {}", processId, containerIdStr);
         break;
       }
       else if ((sleepCounter*sleepInterval) > maxKillWaitTime) {

+ 3 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java

@@ -464,10 +464,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   public String createCGroup(CGroupController controller, String cGroupId)
       throws ResourceHandlerException {
     String path = getPathForCGroup(controller, cGroupId);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("createCgroup: " + path);
-    }
+    LOG.debug("createCgroup: {}", path);
 
     if (!new File(path).mkdir()) {
       throw new ResourceHandlerException("Failed to create cgroup at " + path);
@@ -487,7 +484,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
               + "/tasks"), "UTF-8"))) {
         str = inl.readLine();
         if (str != null) {
-          LOG.debug("First line in cgroup tasks file: " + cgf + " " + str);
+          LOG.debug("First line in cgroup tasks file: {} {}", cgf, str);
         }
       } catch (IOException e) {
         LOG.warn("Failed to read cgroup tasks file. ", e);
@@ -537,9 +534,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
     boolean deleted = false;
     String cGroupPath = getPathForCGroup(controller, cGroupId);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("deleteCGroup: " + cGroupPath);
-    }
+    LOG.debug("deleteCGroup: {}", cGroupPath);
 
     long start = clock.getTime();
 

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java

@@ -153,9 +153,7 @@ public class NetworkPacketTaggingHandlerImpl
   @Override
   public List<PrivilegedOperation> teardown()
       throws ResourceHandlerException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("teardown(): Nothing to do");
-    }
+    LOG.debug("teardown(): Nothing to do");
 
     return null;
   }

+ 5 - 11
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java

@@ -84,9 +84,7 @@ public class ResourceHandlerModule {
         if (cGroupsHandler == null) {
           cGroupsHandler = new CGroupsHandlerImpl(conf,
               PrivilegedOperationExecutor.getInstance(conf));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Value of CGroupsHandler is: " + cGroupsHandler);
-          }
+          LOG.debug("Value of CGroupsHandler is: {}", cGroupsHandler);
         }
       }
     }
@@ -318,16 +316,12 @@ public class ResourceHandlerModule {
 
     Map<String, ResourcePlugin> pluginMap = pluginManager.getNameToPlugins();
     if (pluginMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("List of plugins of ResourcePluginManager was empty " +
-            "while trying to add ResourceHandlers from configuration!");
-      }
+      LOG.debug("List of plugins of ResourcePluginManager was empty " +
+          "while trying to add ResourceHandlers from configuration!");
       return;
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("List of plugins of ResourcePluginManager: " +
-            pluginManager.getNameToPlugins());
-      }
+      LOG.debug("List of plugins of ResourcePluginManager: {}",
+          pluginManager.getNameToPlugins());
     }
 
     for (ResourcePlugin plugin : pluginMap.values()) {

+ 3 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java

@@ -185,10 +185,8 @@ public class TrafficControlBandwidthHandlerImpl
       throws ResourceHandlerException {
     String containerIdStr = containerId.toString();
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Attempting to reacquire classId for container: " +
-          containerIdStr);
-    }
+    LOG.debug("Attempting to reacquire classId for container: {}",
+        containerIdStr);
 
     String classIdStrFromFile = cGroupsHandler.getCGroupParam(
         CGroupsHandler.CGroupController.NET_CLS, containerIdStr,
@@ -277,9 +275,7 @@ public class TrafficControlBandwidthHandlerImpl
   @Override
   public List<PrivilegedOperation> teardown()
       throws ResourceHandlerException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("teardown(): Nothing to do");
-    }
+    LOG.debug("teardown(): Nothing to do");
 
     return null;
   }

+ 5 - 15
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java

@@ -222,9 +222,7 @@ import java.util.regex.Pattern;
       Pattern pattern = Pattern.compile(regex, Pattern.MULTILINE);
 
       if (pattern.matcher(state).find()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Matched regex: " + regex);
-        }
+        LOG.debug("Matched regex: {}", regex);
       } else {
         String logLine = new StringBuffer("Failed to match regex: ")
               .append(regex).append(" Current state: ").append(state).toString();
@@ -258,9 +256,7 @@ import java.util.regex.Pattern;
       String output =
           privilegedOperationExecutor.executePrivilegedOperation(op, true);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("TC state: %n" + output);
-      }
+      LOG.debug("TC state: {}" + output);
 
       return output;
     } catch (PrivilegedOperationException e) {
@@ -332,15 +328,11 @@ import java.util.regex.Pattern;
       String output =
           privilegedOperationExecutor.executePrivilegedOperation(op, true);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("TC stats output:" + output);
-      }
+      LOG.debug("TC stats output:{}", output);
 
       Map<Integer, Integer> classIdBytesStats = parseStatsString(output);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("classId -> bytes sent %n" + classIdBytesStats);
-      }
+      LOG.debug("classId -> bytes sent {}", classIdBytesStats);
 
       return classIdBytesStats;
     } catch (PrivilegedOperationException e) {
@@ -467,9 +459,7 @@ import java.util.regex.Pattern;
     //e.g 4325381 -> 00420005
     String classIdStr = String.format("%08x", Integer.parseInt(input));
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("ClassId hex string : " + classIdStr);
-    }
+    LOG.debug("ClassId hex string : {}", classIdStr);
 
     //extract and return 4 digits
     //e.g 00420005 -> 0005

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java

@@ -129,10 +129,8 @@ public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
       }
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Using container runtime: " + runtime.getClass()
+    LOG.debug("Using container runtime: {}", runtime.getClass()
           .getSimpleName());
-    }
 
     return runtime;
   }

+ 13 - 27
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java

@@ -511,11 +511,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
             + ", please check error message in log to understand "
             + "why this happens.";
     LOG.error(message);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("All docker volumes in the system, command="
-          + dockerVolumeInspectCommand.toString());
-    }
+    LOG.debug("All docker volumes in the system, command={}",
+        dockerVolumeInspectCommand);
 
     throw new ContainerExecutionException(message);
   }
@@ -630,30 +627,22 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   protected void addCGroupParentIfRequired(String resourcesOptions,
       String containerIdStr, DockerRunCommand runCommand) {
     if (cGroupsHandler == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to"
+      LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to"
             + " do.");
-      }
       return;
     }
 
     if (resourcesOptions.equals(PrivilegedOperation.CGROUP_ARG_PREFIX
             + PrivilegedOperation.CGROUP_ARG_NO_TASKS)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("no resource restrictions specified. not using docker's "
-            + "cgroup options");
-      }
+      LOG.debug("no resource restrictions specified. not using docker's "
+          + "cgroup options");
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("using docker's cgroups options");
-      }
+      LOG.debug("using docker's cgroups options");
 
       String cGroupPath = "/"
           + cGroupsHandler.getRelativePathForCGroup(containerIdStr);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("using cgroup parent: " + cGroupPath);
-      }
+      LOG.debug("using cgroup parent: {}", cGroupPath);
 
       runCommand.setCGroupParent(cGroupPath);
     }
@@ -1368,9 +1357,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     if (tcCommandFile != null) {
       launchOp.appendArgs(tcCommandFile);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Launching container with cmd: " + command);
-    }
+    LOG.debug("Launching container with cmd: {}", command);
 
     return launchOp;
   }
@@ -1391,8 +1378,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     long start = System.currentTimeMillis();
     DockerPullCommand dockerPullCommand = new DockerPullCommand(imageName);
-    LOG.debug("now pulling docker image." + " image name: " + imageName + ","
-        + " container: " + containerIdStr);
+    LOG.debug("now pulling docker image. image name: {}, container: {}",
+        imageName, containerIdStr);
 
     DockerCommandExecutor.executeDockerCommand(dockerPullCommand,
         containerIdStr, null,
@@ -1400,10 +1387,9 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     long end = System.currentTimeMillis();
     long pullImageTimeMs = end - start;
-    LOG.debug("pull docker image done with "
-        + String.valueOf(pullImageTimeMs) + "ms spent."
-        + " image name: " + imageName + ","
-        + " container: " + containerIdStr);
+
+    LOG.debug("pull docker image done with {}ms specnt. image name: {},"
+        + " container: {}", pullImageTimeMs, imageName, containerIdStr);
   }
 
   private void executeLivelinessCheck(ContainerRuntimeContext ctx)

+ 7 - 12
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java

@@ -83,9 +83,8 @@ public final class DockerCommandExecutor {
     if (disableFailureLogging) {
       dockerOp.disableFailureLogging();
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Running docker command: " + dockerCommand);
-    }
+    LOG.debug("Running docker command: {}", dockerCommand);
+
     try {
       String result = privilegedOperationExecutor
           .executePrivilegedOperation(null, dockerOp, null,
@@ -118,17 +117,13 @@ public final class DockerCommandExecutor {
           privilegedOperationExecutor, nmContext);
       DockerContainerStatus dockerContainerStatus = parseContainerStatus(
           currentContainerStatus);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container Status: " + dockerContainerStatus.getName()
-            + " ContainerId: " + containerId);
-      }
+      LOG.debug("Container Status: {} ContainerId: {}",
+          dockerContainerStatus.getName(), containerId);
+
       return dockerContainerStatus;
     } catch (ContainerExecutionException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container Status: "
-            + DockerContainerStatus.NONEXISTENT.getName()
-            + " ContainerId: " + containerId);
-      }
+      LOG.debug("Container Status: {} ContainerId: {}",
+          DockerContainerStatus.NONEXISTENT.getName(), containerId);
       return DockerContainerStatus.NONEXISTENT;
     }
   }

+ 4 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java

@@ -190,9 +190,7 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
     this.writeLock.lock();
     try {
       Path resourcePath = event.getLocalResourceRequest().getPath();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing " + resourcePath + " of type " + event.getType());
-      }
+      LOG.debug("Processing {} of type {}", resourcePath, event.getType());
       ResourceState oldState = this.stateMachine.getCurrentState();
       ResourceState newState = null;
       try {
@@ -201,11 +199,9 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
         LOG.warn("Can't handle this event at current state", e);
       }
       if (newState != null && oldState != newState) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Resource " + resourcePath + (localPath != null ?
-              "(->" + localPath + ")": "") + " size : " + getSize()
-              + " transitioned from " + oldState + " to " + newState);
-        }
+        LOG.debug("Resource {}{} size : {} transitioned from {} to {}",
+            resourcePath, (localPath != null ? "(->" + localPath + ")": ""),
+            getSize(), oldState, newState);
       }
     } finally {
       this.writeLock.unlock();

+ 14 - 23
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java

@@ -345,10 +345,8 @@ public class ResourceLocalizationService extends CompositeService
         LocalizedResourceProto proto = it.next();
         LocalResource rsrc = new LocalResourcePBImpl(proto.getResource());
         LocalResourceRequest req = new LocalResourceRequest(rsrc);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Recovering localized resource " + req + " at "
-              + proto.getLocalPath());
-        }
+        LOG.debug("Recovering localized resource {} at {}",
+            req, proto.getLocalPath());
         tracker.handle(new ResourceRecoveredEvent(req,
             new Path(proto.getLocalPath()), proto.getSize()));
       }
@@ -514,10 +512,8 @@ public class ResourceLocalizationService extends CompositeService
                   .getApplicationId());
       for (LocalResourceRequest req : e.getValue()) {
         tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Localizing " + req.getPath() +
-              " for container " + c.getContainerId());
-        }
+        LOG.debug("Localizing {} for container {}",
+            req.getPath(), c.getContainerId());
       }
     }
   }
@@ -930,17 +926,13 @@ public class ResourceLocalizationService extends CompositeService
                 + " Either queue is full or threadpool is shutdown.", re);
           }
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Skip downloading resource: " + key + " since it's in"
-                + " state: " + rsrc.getState());
-          }
+          LOG.debug("Skip downloading resource: {} since it's in"
+                + " state: {}", key, rsrc.getState());
           rsrc.unlock();
         }
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skip downloading resource: " + key + " since it is locked"
-              + " by other threads");
-        }
+        LOG.debug("Skip downloading resource: {} since it is locked"
+              + " by other threads", key);
       }
     }
 
@@ -1302,10 +1294,10 @@ public class ResourceLocalizationService extends CompositeService
       if (systemCredentials == null) {
         return null;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding new framework-token for " + appId
-            + " for localization: " + systemCredentials.getAllTokens());
-      }
+
+      LOG.debug("Adding new framework-token for {} for localization: {}",
+          appId, systemCredentials.getAllTokens());
+
       return systemCredentials;
     }
     
@@ -1328,11 +1320,10 @@ public class ResourceLocalizationService extends CompositeService
         LOG.info("Writing credentials to the nmPrivate file "
             + nmPrivateCTokensPath.toString());
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Credentials list in " + nmPrivateCTokensPath.toString()
-              + ": ");
+          LOG.debug("Credentials list in {}: " + nmPrivateCTokensPath);
           for (Token<? extends TokenIdentifier> tk : credentials
               .getAllTokens()) {
-            LOG.debug(tk + " : " + buildTokenFingerprint(tk));
+            LOG.debug("{} : {}", tk, buildTokenFingerprint(tk));
           }
         }
         if (UserGroupInformation.isSecurityEnabled()) {

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java

@@ -41,9 +41,7 @@ public class LocalizerTokenSelector implements
     LOG.debug("Using localizerTokenSelector.");
 
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Token of kind " + token.getKind() + " is found");
-      }
+      LOG.debug("Token of kind {} is found", token.getKind());
       if (LocalizerTokenIdentifier.KIND.equals(token.getKind())) {
         return (Token<LocalizerTokenIdentifier>) token;
       }

+ 3 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java

@@ -383,11 +383,9 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       Credentials systemCredentials =
           context.getSystemCredentialsForApps().get(appId);
       if (systemCredentials != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding new framework-token for " + appId
-              + " for log-aggregation: " + systemCredentials.getAllTokens()
-              + "; userUgi=" + userUgi);
-        }
+        LOG.debug("Adding new framework-token for {} for log-aggregation:"
+            + " {}; userUgi={}", appId, systemCredentials.getAllTokens(),
+            userUgi);
         // this will replace old token
         userUgi.addCredentials(systemCredentials);
       }

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java

@@ -132,10 +132,8 @@ public class NonAggregatingLogHandler extends AbstractService implements
         ApplicationId appId = entry.getKey();
         LogDeleterProto proto = entry.getValue();
         long deleteDelayMsec = proto.getDeletionTime() - now;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Scheduling deletion of " + appId + " logs in "
-              + deleteDelayMsec + " msec");
-        }
+        LOG.debug("Scheduling deletion of {} logs in {} msec", appId,
+            deleteDelayMsec);
         LogDeleterRunnable logDeleter =
             new LogDeleterRunnable(proto.getUser(), appId);
         try {

+ 10 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java

@@ -468,8 +468,8 @@ public class ContainersMonitorImpl extends AbstractService implements
             tmp.append(p.getPID());
             tmp.append(" ");
           }
-          LOG.debug("Current ProcessTree list : "
-              + tmp.substring(0, tmp.length()) + "]");
+          LOG.debug("Current ProcessTree list : {}",
+              tmp.substring(0, tmp.length()) + "]");
         }
 
         // Temporary structure to calculate the total resource utilization of
@@ -495,10 +495,8 @@ public class ContainersMonitorImpl extends AbstractService implements
             if (pId == null || !isResourceCalculatorAvailable()) {
               continue; // processTree cannot be tracked
             }
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Constructing ProcessTree for : PID = " + pId
-                  + " ContainerId = " + containerId);
-            }
+            LOG.debug("Constructing ProcessTree for : PID = {}"
+                +" ContainerId = {}", pId, containerId);
             ResourceCalculatorProcessTree pTree = ptInfo.getProcessTree();
             pTree.updateProcessTree();    // update process-tree
             long currentVmemUsage = pTree.getVirtualMemorySize();
@@ -536,13 +534,11 @@ public class ContainersMonitorImpl extends AbstractService implements
                 + "while monitoring resource of {}", containerId, e);
           }
         }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Total Resource Usage stats in NM by all containers : "
-              + "Virtual Memory= " + vmemUsageByAllContainers
-              + ", Physical Memory= " + pmemByAllContainers
-              + ", Total CPU usage(% per core)= "
-              + cpuUsagePercentPerCoreByAllContainers);
-        }
+        LOG.debug("Total Resource Usage stats in NM by all containers : "
+            + "Virtual Memory= {}, Physical Memory= {}, "
+            + "Total CPU usage(% per core)= {}", vmemUsageByAllContainers,
+            pmemByAllContainers, cpuUsagePercentPerCoreByAllContainers);
+
 
         // Save the aggregated utilization of the containers
         setContainersUtilization(trackedContainersUtilization);
@@ -587,9 +583,7 @@ public class ContainersMonitorImpl extends AbstractService implements
         if (pId != null) {
           // pId will be null, either if the container is not spawned yet
           // or if the container's pid is removed from ContainerExecutor
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Tracking ProcessTree " + pId + " for the first time");
-          }
+          LOG.debug("Tracking ProcessTree {} for the first time", pId);
           ResourceCalculatorProcessTree pt =
               getResourceCalculatorProcessTree(pId);
           ptInfo.setPid(pId);

+ 5 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java

@@ -159,9 +159,7 @@ public class NvidiaGPUPluginForRuntimeV2 implements DevicePlugin,
       lastTimeFoundDevices = r;
       return r;
     } catch (IOException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Failed to get output from " + pathOfGpuBinary);
-      }
+      LOG.debug("Failed to get output from {}", pathOfGpuBinary);
       throw new YarnException(e);
     }
   }
@@ -169,10 +167,8 @@ public class NvidiaGPUPluginForRuntimeV2 implements DevicePlugin,
   @Override
   public DeviceRuntimeSpec onDevicesAllocated(Set<Device> allocatedDevices,
       YarnRuntimeType yarnRuntime) throws Exception {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Generating runtime spec for allocated devices: "
-          + allocatedDevices + ", " + yarnRuntime.getName());
-    }
+    LOG.debug("Generating runtime spec for allocated devices: {}, {}",
+        allocatedDevices, yarnRuntime.getName());
     if (yarnRuntime == YarnRuntimeType.RUNTIME_DOCKER) {
       String nvidiaRuntime = "nvidia";
       String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES";
@@ -201,14 +197,10 @@ public class NvidiaGPUPluginForRuntimeV2 implements DevicePlugin,
     String output = null;
     // output "major:minor" in hex
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Get major numbers from /dev/" + devName);
-      }
+      LOG.debug("Get major numbers from /dev/{}", devName);
       output = shellExecutor.getMajorMinorInfo(devName);
       String[] strs = output.trim().split(":");
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("stat output:" + output);
-      }
+      LOG.debug("stat output:{}", output);
       output = Integer.toString(Integer.parseInt(strs[0], 16));
     } catch (IOException e) {
       String msg =

+ 5 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java

@@ -164,10 +164,10 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
       Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
           new String[]{"stat", "-c", "%t:%T", "/dev/" + devName});
       try {
-        LOG.debug("Get FPGA major-minor numbers from /dev/" + devName);
+        LOG.debug("Get FPGA major-minor numbers from /dev/{}", devName);
         shexec.execute();
         String[] strs = shexec.getOutput().trim().split(":");
-        LOG.debug("stat output:" + shexec.getOutput());
+        LOG.debug("stat output:{}", shexec.getOutput());
         output = Integer.parseInt(strs[0], 16) + ":" +
             Integer.parseInt(strs[1], 16);
       } catch (IOException e) {
@@ -192,7 +192,7 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
             "Failed to execute " + binary + " diagnose, exception message:" + e
                 .getMessage() +", output:" + output + ", continue ...";
         LOG.warn(msg);
-        LOG.debug(shexec.getOutput());
+        LOG.debug("{}", shexec.getOutput());
       }
       return shexec.getOutput();
     }
@@ -241,7 +241,7 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
 
       if (aocxPath.isPresent()) {
         ipFilePath = aocxPath.get().toUri().toString();
-        LOG.debug("Found: " + ipFilePath);
+        LOG.debug("Found: {}", ipFilePath);
       }
     } else {
       LOG.warn("Localized resource is null!");
@@ -278,7 +278,7 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
     try {
       shexec.execute();
       if (0 == shexec.getExitCode()) {
-        LOG.debug(shexec.getOutput());
+        LOG.debug("{}", shexec.getOutput());
         LOG.info("Intel aocl program " + ipPath + " to " +
             aclName + " successfully");
       } else {

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java

@@ -129,9 +129,7 @@ public class GpuDiscoverer {
     } catch (IOException e) {
       numOfErrorExecutionSinceLastSucceed++;
       String msg = getErrorMessageOfScriptExecution(e.getMessage());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(msg);
-      }
+      LOG.debug(msg);
       throw new YarnException(msg, e);
     } catch (YarnException e) {
       numOfErrorExecutionSinceLastSucceed++;

+ 3 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java

@@ -118,11 +118,9 @@ public class AllocationBasedResourceUtilizationTracker implements
       return false;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("before cpuCheck [asked={} > allowed={}]",
-          this.containersAllocation.getCPU(),
-          getContainersMonitor().getVCoresAllocatedForContainers());
-    }
+    LOG.debug("before cpuCheck [asked={} > allowed={}]",
+        this.containersAllocation.getCPU(),
+        getContainersMonitor().getVCoresAllocatedForContainers());
     // Check CPU.
     if (this.containersAllocation.getCPU() + cpuVcores >
         getContainersMonitor().getVCoresAllocatedForContainers()) {

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java

@@ -137,10 +137,8 @@ public class ContainerScheduler extends AbstractService implements
         resourceHandlerChain = ResourceHandlerModule
             .getConfiguredResourceHandlerChain(conf, context);
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Resource handler chain enabled = " + (resourceHandlerChain
-            != null));
-      }
+      LOG.debug("Resource handler chain enabled = {}",
+          (resourceHandlerChain != null));
       if (resourceHandlerChain != null) {
         LOG.debug("Bootstrapping resource handler chain");
         resourceHandlerChain.bootstrap(conf);

+ 20 - 57
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java

@@ -447,10 +447,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   public void storeContainer(ContainerId containerId, int containerVersion,
       long startTime, StartContainerRequest startRequest) throws IOException {
     String idStr = containerId.toString();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainer: containerId= " + idStr
-          + ", startRequest= " + startRequest);
-    }
+    LOG.debug("storeContainer: containerId= {}, startRequest= {}",
+        idStr, startRequest);
     final String keyVersion = getContainerVersionKey(idStr);
     final String keyRequest =
         getContainerKey(idStr, CONTAINER_REQUEST_KEY_SUFFIX);
@@ -488,9 +486,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainerQueued(ContainerId containerId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerQueued: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerQueued: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_QUEUED_KEY_SUFFIX;
@@ -504,9 +500,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   private void removeContainerQueued(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeContainerQueued: containerId=" + containerId);
-    }
+    LOG.debug("removeContainerQueued: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_QUEUED_KEY_SUFFIX;
@@ -520,9 +514,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainerPaused(ContainerId containerId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerPaused: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerPaused: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_PAUSED_KEY_SUFFIX;
@@ -537,9 +529,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void removeContainerPaused(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeContainerPaused: containerId=" + containerId);
-    }
+    LOG.debug("removeContainerPaused: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_PAUSED_KEY_SUFFIX;
@@ -554,10 +544,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerDiagnostics(ContainerId containerId,
       StringBuilder diagnostics) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerDiagnostics: containerId=" + containerId
-          + ", diagnostics=" + diagnostics);
-    }
+    LOG.debug("storeContainerDiagnostics: containerId={}, diagnostics=",
+        containerId, diagnostics);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_DIAGS_KEY_SUFFIX;
@@ -572,9 +560,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerLaunched(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerLaunched: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerLaunched: containerId={}", containerId);
 
     // Removing the container if queued for backward compatibility reasons
     removeContainerQueued(containerId);
@@ -591,9 +577,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerUpdateToken(ContainerId containerId,
       ContainerTokenIdentifier containerTokenIdentifier) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerUpdateToken: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerUpdateToken: containerId={}", containerId);
 
     String keyUpdateToken = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_UPDATE_TOKEN_SUFFIX;
@@ -621,9 +605,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerKilled(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerKilled: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerKilled: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_KILLED_KEY_SUFFIX;
@@ -638,9 +620,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerCompleted(ContainerId containerId,
       int exitCode) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerCompleted: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerCompleted: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_EXIT_CODE_KEY_SUFFIX;
@@ -706,9 +686,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void removeContainer(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeContainer: containerId=" + containerId);
-    }
+    LOG.debug("removeContainer: containerId={}", containerId);
 
     String keyPrefix = CONTAINERS_KEY_PREFIX + containerId.toString();
     try {
@@ -789,10 +767,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeApplication(ApplicationId appId,
       ContainerManagerApplicationProto p) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeApplication: appId=" + appId
-          + ", proto=" + p);
-    }
+    LOG.debug("storeApplication: appId={}, proto={}", appId, p);
 
     String key = APPLICATIONS_KEY_PREFIX + appId;
     try {
@@ -806,9 +781,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void removeApplication(ApplicationId appId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeApplication: appId=" + appId);
-    }
+    LOG.debug("removeApplication: appId={}", appId);
 
     try {
       WriteBatch batch = db.createWriteBatch();
@@ -917,9 +890,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
         return null;
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading completed resource from " + key);
-      }
+      LOG.debug("Loading completed resource from {}", key);
       nextCompletedResource = LocalizedResourceProto.parseFrom(
           entry.getValue());
     }
@@ -952,9 +923,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       }
 
       Path localPath = new Path(key.substring(keyPrefix.length()));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading in-progress resource at " + localPath);
-      }
+      LOG.debug("Loading in-progress resource at {}", localPath);
       nextStartedResource = new SimpleEntry<LocalResourceProto, Path>(
           LocalResourceProto.parseFrom(entry.getValue()), localPath);
     }
@@ -1042,9 +1011,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     String localPath = proto.getLocalPath();
     String startedKey = getResourceStartedKey(user, appId, localPath);
     String completedKey = getResourceCompletedKey(user, appId, localPath);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing localized resource to " + completedKey);
-    }
+    LOG.debug("Storing localized resource to {}", completedKey);
     try {
       WriteBatch batch = db.createWriteBatch();
       try {
@@ -1066,9 +1033,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     String localPathStr = localPath.toString();
     String startedKey = getResourceStartedKey(user, appId, localPathStr);
     String completedKey = getResourceCompletedKey(user, appId, localPathStr);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing local resource at " + localPathStr);
-    }
+    LOG.debug("Removing local resource at {}", localPathStr);
     try {
       WriteBatch batch = db.createWriteBatch();
       try {
@@ -1505,9 +1470,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
             break;
           }
           batch.delete(key);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("cleanup " + keyStr + " from leveldb");
-          }
+          LOG.debug("cleanup {} from leveldb", keyStr);
         }
         db.write(batch);
       } catch (DBException e) {

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java

@@ -237,10 +237,8 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
     request.setAllocatedContainers(allocatedContainers);
     request.getAllocateRequest().setAskList(partitionedAsks.getGuaranteed());
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Forwarding allocate request to the" +
+    LOG.debug("Forwarding allocate request to the" +
           "Distributed Scheduler Service on YARN RM");
-    }
 
     DistributedSchedulingAllocateResponse dsResp =
         getNextInterceptor().allocateForDistributedScheduling(request);

+ 5 - 11
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java

@@ -196,10 +196,8 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
   public synchronized void appFinished(ApplicationId appId) {
     List<ApplicationAttemptId> appAttemptList = appToAppAttemptMap.get(appId);
     if (appAttemptList != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing application attempts NMToken keys for application "
-            + appId);
-      }
+      LOG.debug("Removing application attempts NMToken keys for"
+          + " application {}", appId);
       for (ApplicationAttemptId appAttemptId : appAttemptList) {
         removeAppAttemptKey(appAttemptId);
       }
@@ -233,10 +231,8 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
     if (oldKey == null
         || oldKey.getMasterKey().getKeyId() != identifier.getKeyId()) {
       // Update key only if it is modified.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("NMToken key updated for application attempt : "
-            + identifier.getApplicationAttemptId().toString());
-      }
+      LOG.debug("NMToken key updated for application attempt : {}",
+          identifier.getApplicationAttemptId().toString());
       if (identifier.getKeyId() == currentMasterKey.getMasterKey()
         .getKeyId()) {
         updateAppAttemptKey(appAttemptId, currentMasterKey);
@@ -252,9 +248,7 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
   }
   
   public synchronized void setNodeId(NodeId nodeId) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("updating nodeId : " + nodeId);
-    }
+    LOG.debug("updating nodeId : {}", nodeId);
     this.nodeId = nodeId;
   }
   

+ 18 - 38
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java

@@ -205,18 +205,14 @@ public class NMTimelinePublisher extends CompositeService {
           LOG.error(
               "Failed to publish Container metrics for container " +
                   container.getContainerId());
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Failed to publish Container metrics for container " +
-                container.getContainerId(), e);
-          }
+          LOG.debug("Failed to publish Container metrics for container {}",
+              container.getContainerId(), e);
         } catch (YarnException e) {
           LOG.error(
               "Failed to publish Container metrics for container " +
                   container.getContainerId(), e.getMessage());
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Failed to publish Container metrics for container " +
-                container.getContainerId(), e);
-          }
+          LOG.debug("Failed to publish Container metrics for container {}",
+              container.getContainerId(), e);
         }
       }
     }
@@ -317,17 +313,13 @@ public class NMTimelinePublisher extends CompositeService {
       } catch (IOException e) {
         LOG.error("Failed to publish Container metrics for container "
             + container.getContainerId());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to publish Container metrics for container "
-              + container.getContainerId(), e);
-        }
+        LOG.debug("Failed to publish Container metrics for container {}",
+            container.getContainerId(), e);
       } catch (YarnException e) {
         LOG.error("Failed to publish Container metrics for container "
             + container.getContainerId(), e.getMessage());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to publish Container metrics for container "
-              + container.getContainerId(), e);
-        }
+        LOG.debug("Failed to publish Container metrics for container {}",
+            container.getContainerId(), e);
       }
     }
   }
@@ -347,8 +339,8 @@ public class NMTimelinePublisher extends CompositeService {
   private void putEntity(TimelineEntity entity, ApplicationId appId) {
     try {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Publishing the entity " + entity + ", JSON-style content: "
-            + TimelineUtils.dumpTimelineRecordtoJSON(entity));
+        LOG.debug("Publishing the entity {} JSON-style content: {}",
+            entity, TimelineUtils.dumpTimelineRecordtoJSON(entity));
       }
       TimelineV2Client timelineClient = getTimelineClient(appId);
       if (timelineClient != null) {
@@ -359,14 +351,10 @@ public class NMTimelinePublisher extends CompositeService {
       }
     } catch (IOException e) {
       LOG.error("Error when publishing entity " + entity);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error when publishing entity " + entity, e);
-      }
+      LOG.debug("Error when publishing entity {}", entity, e);
     } catch (YarnException e) {
       LOG.error("Error when publishing entity " + entity, e.getMessage());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error when publishing entity " + entity, e);
-      }
+      LOG.debug("Error when publishing entity {}", entity, e);
     }
   }
 
@@ -388,10 +376,8 @@ public class NMTimelinePublisher extends CompositeService {
       break;
 
     default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(event.getType() + " is not a desired ApplicationEvent which"
-            + " needs to be published by NMTimelinePublisher");
-      }
+      LOG.debug("{} is not a desired ApplicationEvent which"
+          + " needs to be published by NMTimelinePublisher", event.getType());
       break;
     }
   }
@@ -404,11 +390,8 @@ public class NMTimelinePublisher extends CompositeService {
       break;
 
     default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(event.getType()
-            + " is not a desired ContainerEvent which needs to be published by"
-            + " NMTimelinePublisher");
-      }
+      LOG.debug("{} is not a desired ContainerEvent which needs to be "
+            + " published by NMTimelinePublisher", event.getType());
       break;
     }
   }
@@ -425,11 +408,8 @@ public class NMTimelinePublisher extends CompositeService {
           ContainerMetricsConstants.LOCALIZATION_START_EVENT_TYPE);
       break;
     default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(event.getType()
-            + " is not a desired LocalizationEvent which needs to be published"
-            + " by NMTimelinePublisher");
-      }
+      LOG.debug("{} is not a desired LocalizationEvent which needs to be"
+            + " published by NMTimelinePublisher", event.getType());
       break;
     }
   }

+ 4 - 10
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java

@@ -206,9 +206,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
         throws IOException {
     String path = pathForCgroup(controller, groupName);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("createCgroup: " + path);
-    }
+    LOG.debug("createCgroup: {}", path);
 
     if (!new File(path).mkdir()) {
       throw new IOException("Failed to create cgroup at " + path);
@@ -220,9 +218,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     String path = pathForCgroup(controller, groupName);
     param = controller + "." + param;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("updateCgroup: " + path + ": " + param + "=" + value);
-    }
+    LOG.debug("updateCgroup: {}: {}={}", path, param, value);
 
     PrintWriter pw = null;
     try {
@@ -259,7 +255,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
               + "/tasks"), "UTF-8"))) {
         str = inl.readLine();
         if (str != null) {
-          LOG.debug("First line in cgroup tasks file: " + cgf + " " + str);
+          LOG.debug("First line in cgroup tasks file: {} {}", cgf, str);
         }
       } catch (IOException e) {
         LOG.warn("Failed to read cgroup tasks file. ", e);
@@ -302,9 +298,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
   boolean deleteCgroup(String cgroupPath) {
     boolean deleted = false;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("deleteCgroup: " + cgroupPath);
-    }
+    LOG.debug("deleteCgroup: {}", cgroupPath);
     long start = clock.getTime();
     do {
       try {

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java

@@ -353,7 +353,7 @@ public class NodeManagerHardwareUtils {
     for (Map.Entry<String, ResourceInformation> entry : resourceInformation
         .entrySet()) {
       ret.setResourceInformation(entry.getKey(), entry.getValue());
-      LOG.debug("Setting key " + entry.getKey() + " to " + entry.getValue());
+      LOG.debug("Setting key {} to {}", entry.getKey(), entry.getValue());
     }
     if (resourceInformation.containsKey(memory)) {
       Long value = resourceInformation.get(memory).getValue();
@@ -364,7 +364,7 @@ public class NodeManagerHardwareUtils {
       ResourceInformation memResInfo = resourceInformation.get(memory);
       if(memResInfo.getValue() == 0) {
         ret.setMemorySize(getContainerMemoryMB(conf));
-        LOG.debug("Set memory to " + ret.getMemorySize());
+        LOG.debug("Set memory to {}", ret.getMemorySize());
       }
     }
     if (resourceInformation.containsKey(vcores)) {
@@ -376,10 +376,10 @@ public class NodeManagerHardwareUtils {
       ResourceInformation vcoresResInfo = resourceInformation.get(vcores);
       if(vcoresResInfo.getValue() == 0) {
         ret.setVirtualCores(getVCores(conf));
-        LOG.debug("Set vcores to " + ret.getVirtualCores());
+        LOG.debug("Set vcores to {}", ret.getVirtualCores());
       }
     }
-    LOG.debug("Node resource information map is " + ret);
+    LOG.debug("Node resource information map is {}", ret);
     return ret;
   }
 }

+ 3 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java

@@ -49,9 +49,7 @@ public class ProcessIdFileReader {
     if (path == null) {
       throw new IOException("Trying to access process id from a null path");
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Accessing pid from pid file " + path);
-    }
+    LOG.debug("Accessing pid from pid file {}", path);
     String processId = null;
     BufferedReader bufReader = null;
 
@@ -99,10 +97,8 @@ public class ProcessIdFileReader {
         bufReader.close();
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got pid " + (processId != null ? processId : "null")
-          + " from path " + path);
-    }
+    LOG.debug("Got pid {} from path {}",
+        (processId != null ? processId : "null"), path);
     return processId;
   }
 

+ 1 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java

@@ -151,9 +151,7 @@ public class ContainerLogsPage extends NMView {
                 printAggregatedLogFileDirectory(html, containersLogMeta);
               }
             } catch (Exception ex) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug(ex.getMessage());
-              }
+              LOG.debug("{}", ex);
             }
           }
         } else {

+ 3 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java

@@ -328,9 +328,7 @@ public class NMWebServices {
       } catch (IOException ex) {
         // Something wrong with we tries to access the remote fs for the logs.
         // Skip it and do nothing
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(ex.getMessage());
-        }
+        LOG.debug("{}", ex);
       }
       GenericEntity<List<ContainerLogsInfo>> meta = new GenericEntity<List<
           ContainerLogsInfo>>(containersLogsInfo){};
@@ -433,10 +431,8 @@ public class NMWebServices {
     } catch (Exception ex) {
       // This NM does not have this container any more. We
       // assume the container has already finished.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Can not find the container:" + containerId
-            + " in this node.");
-      }
+      LOG.debug("Can not find the container:{} in this node.",
+          containerId);
     }
     final boolean isRunning = tempIsRunning;
     File logFile = null;

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java

@@ -213,10 +213,8 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
 
   @Override
   public void fenceOldActive(byte[] oldActiveData) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Request to fence old active being ignored, " +
-          "as embedded leader election doesn't support fencing");
-    }
+    LOG.debug("Request to fence old active being ignored, " +
+        "as embedded leader election doesn't support fencing");
   }
 
   private static byte[] createActiveNodeInfo(String clusterId, String rmId)

+ 2 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java

@@ -1470,10 +1470,8 @@ public class ClientRMService extends AbstractService implements
       ReservationDefinition contract, String reservationId) {
     if ((contract.getArrival() - clock.getTime()) < reservationSystem
         .getPlanFollowerTimeStep()) {
-      LOG.debug(MessageFormat
-          .format(
-              "Reservation {0} is within threshold so attempting to create synchronously.",
-              reservationId));
+      LOG.debug("Reservation {} is within threshold so attempting to"
+          + " create synchronously.", reservationId);
       reservationSystem.synchronizePlan(planName, true);
       LOG.info(MessageFormat.format("Created reservation {0} synchronously.",
           reservationId));

+ 5 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java

@@ -292,7 +292,7 @@ public class DecommissioningNodesWatcher {
         }
         // Remove stale non-DECOMMISSIONING node
         if (d.nodeState != NodeState.DECOMMISSIONING) {
-          LOG.debug("remove " + d.nodeState + " " + d.nodeId);
+          LOG.debug("remove {} {}", d.nodeState, d.nodeId);
           it.remove();
           continue;
         } else if (now - d.lastUpdateTime > 60000L) {
@@ -300,7 +300,7 @@ public class DecommissioningNodesWatcher {
           RMNode rmNode = getRmNode(d.nodeId);
           if (rmNode != null &&
               rmNode.getState() == NodeState.DECOMMISSIONED) {
-            LOG.debug("remove " + rmNode.getState() + " " + d.nodeId);
+            LOG.debug("remove {} {}", rmNode.getState(), d.nodeId);
             it.remove();
             continue;
           }
@@ -308,7 +308,7 @@ public class DecommissioningNodesWatcher {
         if (d.timeoutMs >= 0 &&
             d.decommissioningStartTime + d.timeoutMs < now) {
           staleNodes.add(d.nodeId);
-          LOG.debug("Identified stale and timeout node " + d.nodeId);
+          LOG.debug("Identified stale and timeout node {}", d.nodeId);
         }
       }
 
@@ -342,14 +342,14 @@ public class DecommissioningNodesWatcher {
       ApplicationId appId = it.next();
       RMApp rmApp = rmContext.getRMApps().get(appId);
       if (rmApp == null) {
-        LOG.debug("Consider non-existing app " + appId + " as completed");
+        LOG.debug("Consider non-existing app {} as completed", appId);
         it.remove();
         continue;
       }
       if (rmApp.getState() == RMAppState.FINISHED ||
           rmApp.getState() == RMAppState.FAILED ||
           rmApp.getState() == RMAppState.KILLED) {
-        LOG.debug("Remove " + rmApp.getState() + " app " + appId);
+        LOG.debug("Remove {} app {}", rmApp.getState(), appId);
         it.remove();
       }
     }

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java

@@ -493,17 +493,17 @@ public class NodesListManager extends CompositeService implements
     RMNode eventNode = event.getNode();
     switch (event.getType()) {
     case NODE_UNUSABLE:
-      LOG.debug(eventNode + " reported unusable");
+      LOG.debug("{} reported unusable", eventNode);
       sendRMAppNodeUpdateEventToNonFinalizedApps(eventNode,
           RMAppNodeUpdateType.NODE_UNUSABLE);
       break;
     case NODE_USABLE:
-      LOG.debug(eventNode + " reported usable");
+      LOG.debug("{} reported usable", eventNode);
       sendRMAppNodeUpdateEventToNonFinalizedApps(eventNode,
           RMAppNodeUpdateType.NODE_USABLE);
       break;
     case NODE_DECOMMISSIONING:
-      LOG.debug(eventNode + " reported decommissioning");
+      LOG.debug("{} reported decommissioning", eventNode);
       sendRMAppNodeUpdateEventToNonFinalizedApps(
           eventNode, RMAppNodeUpdateType.NODE_DECOMMISSIONING);
       break;

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java

@@ -618,8 +618,8 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
   @Override
   public void handle(RMAppManagerEvent event) {
     ApplicationId applicationId = event.getApplicationId();
-    LOG.debug("RMAppManager processing event for " 
-        + applicationId + " of type " + event.getType());
+    LOG.debug("RMAppManager processing event for {} of type {}",
+        applicationId, event.getType());
     switch (event.getType()) {
     case APP_COMPLETED :
       finishApplication(applicationId);

Some files were not shown because too many files changed in this diff