瀏覽代碼

HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions.
Contributed by Andras Bokor.

Steve Loughran 7 年之前
父節點
當前提交
f20dc0d577
共有 20 個文件被更改,包括 576 次插入749 次删除
  1. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
  2. 12 18
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  3. 10 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  4. 4 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
  5. 4 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
  6. 19 20
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
  7. 8 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
  8. 2 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
  9. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
  10. 10 15
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
  11. 127 187
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
  12. 154 187
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  13. 92 119
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  14. 79 93
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  15. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  16. 17 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
  17. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  18. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
  19. 10 12
      hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
  20. 11 18
      hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -139,7 +139,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
           LOG.warn("Ignoring failure of renameTo");
         }
     } catch (IOException e) {
-      LOG.warn("Error moving bad file " + p + ": " + e);
+      LOG.warn("Error moving bad file " + p, e);
     }
     return false;
   }

+ 12 - 18
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -888,9 +888,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       Stat oldBreadcrumbStat = fenceOldActive();
       writeBreadCrumbNode(oldBreadcrumbStat);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Becoming active for " + this);
-      }
+      LOG.debug("Becoming active for {}", this);
+
       appClient.becomeActive();
       state = State.ACTIVE;
       return true;
@@ -910,8 +909,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       throws KeeperException, InterruptedException {
     Preconditions.checkState(appData != null, "no appdata");
     
-    LOG.info("Writing znode " + zkBreadCrumbPath +
-        " to indicate that the local node is the most recent active...");
+    LOG.info("Writing znode {} to indicate that the local " +
+        "node is the most recent active...", zkBreadCrumbPath);
     if (oldBreadcrumbStat == null) {
       // No previous active, just create the node
       createWithRetries(zkBreadCrumbPath, appData, zkAcl,
@@ -948,9 +947,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       
       deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
     } catch (Exception e) {
-      LOG.warn("Unable to delete our own bread-crumb of being active at " +
-          zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
-          "Expecting to be fenced by the next active.");
+      LOG.warn("Unable to delete our own bread-crumb of being active at {}." +
+          ". Expecting to be fenced by the next active.", zkBreadCrumbPath, e);
     }
   }
 
@@ -984,7 +982,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       throw ke;
     }
 
-    LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
+    LOG.info("Old node exists: {}", StringUtils.byteToHexString(data));
     if (Arrays.equals(data, appData)) {
       LOG.info("But old node has our own data, so don't need to fence it.");
     } else {
@@ -995,9 +993,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void becomeStandby() {
     if (state != State.STANDBY) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Becoming standby for " + this);
-      }
+      LOG.debug("Becoming standby for {}", this);
       state = State.STANDBY;
       appClient.becomeStandby();
     }
@@ -1005,9 +1001,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void enterNeutralMode() {
     if (state != State.NEUTRAL) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Entering neutral mode for " + this);
-      }
+      LOG.debug("Entering neutral mode for {}", this);
       state = State.NEUTRAL;
       appClient.enterNeutralMode();
     }
@@ -1124,7 +1118,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   private synchronized boolean isStaleClient(Object ctx) {
     Preconditions.checkNotNull(ctx);
     if (zkClient != (ZooKeeper)ctx) {
-      LOG.warn("Ignoring stale result from old client with sessionId " +
+      LOG.warn("Ignoring stale result from old client with sessionId {}",
           String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
       return true;
     }
@@ -1162,8 +1156,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         throws KeeperException, IOException {
       try {
         if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
-          LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
-              + connectionTimeoutMs + " milliseconds");
+          LOG.error("Connection timed out: couldn't connect to ZooKeeper in " +
+              "{} milliseconds", connectionTimeoutMs);
           zk.close();
           throw KeeperException.create(Code.CONNECTIONLOSS);
         }

+ 10 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -123,7 +123,7 @@ public class FailoverController {
       toSvcStatus = toSvc.getServiceStatus();
     } catch (IOException e) {
       String msg = "Unable to get service state for " + target;
-      LOG.error(msg + ": " + e.getLocalizedMessage());
+      LOG.error(msg, e);
       throw new FailoverFailedException(msg, e);
     }
 
@@ -139,7 +139,7 @@ public class FailoverController {
             target + " is not ready to become active: " +
             notReadyReason);
       } else {
-        LOG.warn("Service is not ready to become active, but forcing: " +
+        LOG.warn("Service is not ready to become active, but forcing: {}",
             notReadyReason);
       }
     }
@@ -172,11 +172,11 @@ public class FailoverController {
       proxy.transitionToStandby(createReqInfo());
       return true;
     } catch (ServiceFailedException sfe) {
-      LOG.warn("Unable to gracefully make " + svc + " standby (" +
-          sfe.getMessage() + ")");
+      LOG.warn("Unable to gracefully make {} standby ({})",
+          svc, sfe.getMessage());
     } catch (IOException ioe) {
-      LOG.warn("Unable to gracefully make " + svc +
-          " standby (unable to connect)", ioe);
+      LOG.warn("Unable to gracefully make {} standby (unable to connect)",
+          svc, ioe);
     } finally {
       if (proxy != null) {
         RPC.stopProxy(proxy);
@@ -227,13 +227,13 @@ public class FailoverController {
           toSvc.getProxy(conf, rpcTimeoutToNewActive),
           createReqInfo());
     } catch (ServiceFailedException sfe) {
-      LOG.error("Unable to make " + toSvc + " active (" +
-          sfe.getMessage() + "). Failing back.");
+      LOG.error("Unable to make {} active ({}). Failing back.",
+          toSvc, sfe.getMessage());
       failed = true;
       cause = sfe;
     } catch (IOException ioe) {
-      LOG.error("Unable to make " + toSvc +
-          " active (unable to connect). Failing back.", ioe);
+      LOG.error("Unable to make {} active (unable to connect). Failing back.",
+          toSvc, ioe);
       failed = true;
       cause = ioe;
     }

+ 4 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java

@@ -204,12 +204,11 @@ public class HealthMonitor {
         healthy = true;
       } catch (Throwable t) {
         if (isHealthCheckFailedException(t)) {
-          LOG.warn("Service health check failed for " + targetToMonitor
-              + ": " + t.getMessage());
+          LOG.warn("Service health check failed for {}", targetToMonitor, t);
           enterState(State.SERVICE_UNHEALTHY);
         } else {
-          LOG.warn("Transport-level exception trying to monitor health of " +
-              targetToMonitor + ": " + t.getCause() + " " + t.getLocalizedMessage());
+          LOG.warn("Transport-level exception trying to monitor health of {}",
+              targetToMonitor, t);
           RPC.stopProxy(proxy);
           proxy = null;
           enterState(State.SERVICE_NOT_RESPONDING);
@@ -246,7 +245,7 @@ public class HealthMonitor {
 
   private synchronized void enterState(State newState) {
     if (newState != state) {
-      LOG.info("Entering state " + newState);
+      LOG.info("Entering state {}", newState);
       state = newState;
       synchronized (callbacks) {
         for (Callback cb : callbacks) {

+ 4 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java

@@ -72,9 +72,7 @@ public class RetryUtils {
             retryPolicySpecKey, defaultRetryPolicySpec
             );
     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
-    }
+    LOG.debug("multipleLinearRandomRetry = {}", multipleLinearRandomRetry);
 
     if (multipleLinearRandomRetry == null) {
       //no retry
@@ -124,10 +122,9 @@ public class RetryUtils {
         p = RetryPolicies.TRY_ONCE_THEN_FAIL;
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("RETRY " + retries + ") policy="
-            + p.getClass().getSimpleName() + ", exception=" + e);
-      }
+      LOG.debug("RETRY {}) policy={}", retries,
+            p.getClass().getSimpleName(), e);
+
       return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
     }
 

+ 19 - 20
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java

@@ -179,8 +179,7 @@ public class DNS {
         netIf = getSubinterface(strInterface);
       }
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
+      LOG.warn("I/O error finding interface {}", strInterface, e);
       return new String[] { cachedHostAddress };
     }
     if (netIf == null) {
@@ -265,7 +264,7 @@ public class DNS {
     }
 
     if (hosts.isEmpty()) {
-      LOG.warn("Unable to determine hostname for interface " +
+      LOG.warn("Unable to determine hostname for interface {}",
           strInterface);
       hosts.add(cachedHostname);
     }
@@ -283,8 +282,8 @@ public class DNS {
     try {
       localhost = InetAddress.getLocalHost().getCanonicalHostName();
     } catch (UnknownHostException e) {
-      LOG.warn("Unable to determine local hostname "
-          + "-falling back to \"" + LOCALHOST + "\"", e);
+      LOG.warn("Unable to determine local hostname -falling back to '{}'",
+          LOCALHOST, e);
       localhost = LOCALHOST;
     }
     return localhost;
@@ -303,21 +302,21 @@ public class DNS {
    */
   private static String resolveLocalHostIPAddress() {
     String address;
+    try {
+      address = InetAddress.getLocalHost().getHostAddress();
+    } catch (UnknownHostException e) {
+      LOG.warn("Unable to determine address of the host " +
+          "-falling back to '{}' address", LOCALHOST, e);
       try {
-        address = InetAddress.getLocalHost().getHostAddress();
-      } catch (UnknownHostException e) {
-        LOG.warn("Unable to determine address of the host"
-                + "-falling back to \"" + LOCALHOST + "\" address", e);
-        try {
-          address = InetAddress.getByName(LOCALHOST).getHostAddress();
-        } catch (UnknownHostException noLocalHostAddressException) {
-          //at this point, deep trouble
-          LOG.error("Unable to determine local loopback address "
-                  + "of \"" + LOCALHOST + "\" " +
-                  "-this system's network configuration is unsupported", e);
-          address = null;
-        }
+        address = InetAddress.getByName(LOCALHOST).getHostAddress();
+      } catch (UnknownHostException noLocalHostAddressException) {
+        //at this point, deep trouble
+        LOG.error("Unable to determine local loopback address of '{}' " +
+            "-this system's network configuration is unsupported",
+            LOCALHOST, e);
+        address = null;
       }
+    }
     return address;
   }
 
@@ -431,8 +430,8 @@ public class DNS {
         netIf = getSubinterface(strInterface);
       }
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
+      LOG.warn("I/O error finding interface {}: {}",
+          strInterface, e.getMessage());
       return Arrays.asList(InetAddress.getByName(cachedHostAddress));
     }
     if (netIf == null) {

+ 8 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java

@@ -194,9 +194,7 @@ public abstract class AbstractService implements Service {
           serviceStart();
           if (isInState(STATE.STARTED)) {
             //if the service started (and isn't now in a later state), notify
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Service " + getName() + " is started");
-            }
+            LOG.debug("Service {} is started", getName());
             notifyListeners();
           }
         } catch (Exception e) {
@@ -235,9 +233,7 @@ public abstract class AbstractService implements Service {
         }
       } else {
         //already stopped: note it
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Ignoring re-entrant call to stop()");
-        }
+        LOG.debug("Ignoring re-entrant call to stop()");
       }
     }
   }
@@ -258,9 +254,7 @@ public abstract class AbstractService implements Service {
    * @param exception the exception
    */
   protected final void noteFailure(Exception exception) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("noteFailure " + exception, (Throwable) null);
-    }
+    LOG.debug("noteFailure {}" + exception);
     if (exception == null) {
       //make sure failure logic doesn't itself cause problems
       return;
@@ -270,10 +264,8 @@ public abstract class AbstractService implements Service {
       if (failureCause == null) {
         failureCause = exception;
         failureState = getServiceState();
-        LOG.info("Service " + getName()
-                 + " failed in state " + failureState
-                 + "; cause: " + exception,
-                 exception);
+        LOG.info("Service {} failed in state {}",
+            getName(), failureState, exception);
       }
     }
   }
@@ -418,8 +410,7 @@ public abstract class AbstractService implements Service {
       listeners.notifyListeners(this);
       globalListeners.notifyListeners(this);
     } catch (Throwable e) {
-      LOG.warn("Exception while notifying listeners of " + this + ": " + e,
-               e);
+      LOG.warn("Exception while notifying listeners of {}", this, e);
     }
   }
 
@@ -449,10 +440,8 @@ public abstract class AbstractService implements Service {
     assert stateModel != null : "null state in " + name + " " + this.getClass();
     STATE oldState = stateModel.enterState(newState);
     if (oldState != newState) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-          "Service: " + getName() + " entered state " + getServiceState());
-      }
+      LOG.debug("Service: {} entered state {}", getName(), getServiceState());
+
       recordLifecycleEvent();
     }
     return oldState;

+ 2 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java

@@ -81,9 +81,7 @@ public final class ServiceOperations {
     try {
       stop(service);
     } catch (Exception e) {
-      log.warn("When stopping the service " + service.getName()
-               + " : " + e,
-               e);
+      log.warn("When stopping the service " + service.getName(), e);
       return e;
     }
     return null;
@@ -103,7 +101,7 @@ public final class ServiceOperations {
     try {
       stop(service);
     } catch (Exception e) {
-      log.warn("When stopping the service {} : {}", service.getName(), e, e);
+      log.warn("When stopping the service {}", service.getName(), e);
       return e;
     }
     return null;

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java

@@ -57,8 +57,7 @@ public class TestServiceOperations {
     ServiceOperations.stopQuietly(logger, service);
 
     assertThat(logCapturer.getOutput(),
-        containsString("When stopping the service " + service.getName()
-            + " : " + e));
+        containsString("When stopping the service " + service.getName()));
     verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
   }
 

+ 10 - 15
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
-import org.apache.commons.logging.LogFactory;
-
 import java.io.IOException;
 import java.net.URI;
 import java.nio.file.FileSystemException;
@@ -32,7 +30,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -50,12 +47,15 @@ import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A cache saves DFSClient objects for different users.
  */
 class DFSClientCache {
-  private static final Log LOG = LogFactory.getLog(DFSClientCache.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DFSClientCache.class);
   /**
    * Cache that maps User id to the corresponding DFSClient.
    */
@@ -169,8 +169,8 @@ class DFSClientCache {
       URI value = namenodeUriMap.get(namenodeId);
       // if a unique nnid, add it to the map
       if (value == null) {
-        LOG.info("Added export:" + exportPath + " FileSystem URI:" + exportURI
-              + " with namenodeId:" + namenodeId);
+        LOG.info("Added export: {} FileSystem URI: {} with namenodeId: {}",
+            exportPath, exportPath, namenodeId);
         namenodeUriMap.put(namenodeId, exportURI);
       } else {
         // if the nnid already exists, it better be the for the same namenode
@@ -194,7 +194,7 @@ class DFSClientCache {
       try {
         closeAll(true);
       } catch (IOException e) {
-        LOG.info("DFSClientCache.closeAll() threw an exception:\n", e);
+        LOG.info("DFSClientCache.closeAll() threw an exception", e);
       }
     }
   }
@@ -269,10 +269,7 @@ class DFSClientCache {
 
     UserGroupInformation ugi =
             UserGroupInformation.createProxyUser(effectiveUser, realUser);
-    if (LOG.isDebugEnabled()){
-      LOG.debug(String.format("Created ugi:" +
-              " %s for username: %s", ugi, effectiveUser));
-    }
+    LOG.debug("Created ugi: {} for username: {}", ugi, effectiveUser);
     return ugi;
   }
 
@@ -329,8 +326,7 @@ class DFSClientCache {
     try {
       client = clientCache.get(new DfsClientKey(userName, namenodeId));
     } catch (ExecutionException e) {
-      LOG.error("Failed to create DFSClient for user:" + userName + " Cause:"
-          + e);
+      LOG.error("Failed to create DFSClient for user: {}", userName, e);
     }
     return client;
   }
@@ -343,8 +339,7 @@ class DFSClientCache {
     try {
       s = inputstreamCache.get(k);
     } catch (ExecutionException e) {
-      LOG.warn("Failed to create DFSInputStream for user:" + userName
-          + " Cause:" + e);
+      LOG.warn("Failed to create DFSInputStream for user: {}", userName, e);
     }
     return s;
   }

+ 127 - 187
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -31,8 +31,6 @@ import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -61,13 +59,15 @@ import org.jboss.netty.channel.Channel;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OpenFileCtx saves the context of one HDFS file output stream. Access to it is
  * synchronized by its member lock.
  */
 class OpenFileCtx {
-  public static final Log LOG = LogFactory.getLog(OpenFileCtx.class);
+  public static final Logger LOG = LoggerFactory.getLogger(OpenFileCtx.class);
   
   // Pending writes water mark for dump, 1MB
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
@@ -210,10 +210,8 @@ class OpenFileCtx {
   /** Increase or decrease the memory occupation of non-sequential writes */
   private long updateNonSequentialWriteInMemory(long count) {
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: "
-          + newValue);
-    }
+    LOG.debug("Update nonSequentialWriteInMemory by {} new value: {}",
+        count, newValue);
 
     Preconditions.checkState(newValue >= 0,
         "nonSequentialWriteInMemory is negative " + newValue
@@ -273,9 +271,7 @@ class OpenFileCtx {
   // Check if need to dump the new writes
   private void waitForDump() {
     if (!enabledDump) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Do nothing, dump is disabled.");
-      }
+      LOG.debug("Do nothing, dump is disabled.");
       return;
     }
 
@@ -286,9 +282,7 @@ class OpenFileCtx {
     // wake up the dumper thread to dump the data
     synchronized (this) {
       if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Asking dumper to dump...");
-        }
+        LOG.debug("Asking dumper to dump...");
         if (dumpThread == null) {
           dumpThread = new Daemon(new Dumper());
           dumpThread.start();
@@ -312,7 +306,7 @@ class OpenFileCtx {
     private void dump() {
       // Create dump outputstream for the first time
       if (dumpOut == null) {
-        LOG.info("Create dump file: " + dumpFilePath);
+        LOG.info("Create dump file: {}", dumpFilePath);
         File dumpFile = new File(dumpFilePath);
         try {
           synchronized (this) {
@@ -322,13 +316,14 @@ class OpenFileCtx {
             dumpOut = new FileOutputStream(dumpFile);
           }
         } catch (IOException e) {
-          LOG.error("Got failure when creating dump stream " + dumpFilePath, e);
+          LOG.error("Got failure when creating dump stream {}",
+              dumpFilePath, e);
           enabledDump = false;
           if (dumpOut != null) {
             try {
               dumpOut.close();
             } catch (IOException e1) {
-              LOG.error("Can't close dump stream " + dumpFilePath, e);
+              LOG.error("Can't close dump stream {}", dumpFilePath, e);
             }
           }
           return;
@@ -340,17 +335,15 @@ class OpenFileCtx {
         try {
           raf = new RandomAccessFile(dumpFilePath, "r");
         } catch (FileNotFoundException e) {
-          LOG.error("Can't get random access to file " + dumpFilePath);
+          LOG.error("Can't get random access to file {}", dumpFilePath);
           // Disable dump
           enabledDump = false;
           return;
         }
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == "
-            + nonSequentialWriteInMemory.get());
-      }
+      LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == {}",
+            nonSequentialWriteInMemory.get());
 
       Iterator<OffsetRange> it = pendingWrites.keySet().iterator();
       while (activeState && it.hasNext()
@@ -367,18 +360,16 @@ class OpenFileCtx {
             updateNonSequentialWriteInMemory(-dumpedDataSize);
           }
         } catch (IOException e) {
-          LOG.error("Dump data failed: " + writeCtx + " with error: " + e
-              + " OpenFileCtx state: " + activeState);
+          LOG.error("Dump data failed: {} OpenFileCtx state: {}",
+              writeCtx, activeState, e);
           // Disable dump
           enabledDump = false;
           return;
         }
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("After dump, nonSequentialWriteInMemory == "
-            + nonSequentialWriteInMemory.get());
-      }
+      LOG.debug("After dump, nonSequentialWriteInMemory == {}",
+          nonSequentialWriteInMemory.get());
     }
 
     @Override
@@ -393,26 +384,22 @@ class OpenFileCtx {
               OpenFileCtx.this.notifyAll();
               try {
                 OpenFileCtx.this.wait();
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("Dumper woke up");
-                }
+                LOG.debug("Dumper woke up");
               } catch (InterruptedException e) {
-                LOG.info("Dumper is interrupted, dumpFilePath= "
-                    + OpenFileCtx.this.dumpFilePath);
+                LOG.info("Dumper is interrupted, dumpFilePath = {}",
+                    OpenFileCtx.this.dumpFilePath);
               }
             }
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Dumper checking OpenFileCtx activeState: " + activeState
-                + " enabledDump: " + enabledDump);
-          }
+          LOG.debug("Dumper checking OpenFileCtx activeState: {} " +
+              "enabledDump: {}", activeState, enabledDump);
         } catch (Throwable t) {
           // unblock threads with new request
           synchronized (OpenFileCtx.this) {
             OpenFileCtx.this.notifyAll();
           }
-          LOG.info("Dumper get Throwable: " + t + ". dumpFilePath: "
-              + OpenFileCtx.this.dumpFilePath, t);
+          LOG.info("Dumper got Throwable. dumpFilePath: {}",
+              OpenFileCtx.this.dumpFilePath, t);
           activeState = false;
         }
       }
@@ -428,8 +415,8 @@ class OpenFileCtx {
       return null;
     } else {
       if (xid != writeCtx.getXid()) {
-        LOG.warn("Got a repeated request, same range, with a different xid: "
-            + xid + " xid in old request: " + writeCtx.getXid());
+        LOG.warn("Got a repeated request, same range, with a different xid: " +
+            "{} xid in old request: {}", xid, writeCtx.getXid());
         //TODO: better handling.
       }
       return writeCtx;  
@@ -441,8 +428,8 @@ class OpenFileCtx {
       IdMappingServiceProvider iug) {
     
     if (!activeState) {
-      LOG.info("OpenFileCtx is inactive, fileId: "
-          + request.getHandle().dumpFileHandle());
+      LOG.info("OpenFileCtx is inactive, fileId: {}",
+          request.getHandle().dumpFileHandle());
       WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
           fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
@@ -460,15 +447,11 @@ class OpenFileCtx {
           xid);
       if (existantWriteCtx != null) {
         if (!existantWriteCtx.getReplied()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Repeated write request which hasn't been served: xid="
-                + xid + ", drop it.");
-          }
+          LOG.debug("Repeated write request which hasn't been served: " +
+                    "xid={}, drop it.", xid);
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Repeated write request which is already served: xid="
-                + xid + ", resend response.");
-          }
+          LOG.debug("Repeated write request which is already served: xid={}" +
+              ", resend response.", xid);
           WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
           WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
               fileWcc, request.getCount(), request.getStableHow(),
@@ -489,13 +472,11 @@ class OpenFileCtx {
     long offset = request.getOffset();
     int count = request.getCount();
     long smallerCount = offset + count - cachedOffset;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("Got overwrite with appended data [%d-%d),"
-          + " current offset %d," + " drop the overlapped section [%d-%d)"
-          + " and append new data [%d-%d).", offset, (offset + count),
-          cachedOffset, offset, cachedOffset, cachedOffset, (offset
-              + count)));
-    }
+    LOG.debug("Got overwrite with appended data [{}-{}),"
+            + " current offset {}," + " drop the overlapped section [{}-{})"
+            + " and append new data [{}-{}).", offset, (offset + count),
+            cachedOffset, offset, cachedOffset, cachedOffset,
+        (offset + count));
     
     ByteBuffer data = request.getData();
     Preconditions.checkState(data.position() == 0,
@@ -538,10 +519,8 @@ class OpenFileCtx {
     long cachedOffset = nextOffset.get();
     int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("requested offset=" + offset + " and current offset="
-          + cachedOffset);
-    }
+    LOG.debug("requested offset={} and current offset={}",
+        offset, cachedOffset);
 
     // Ignore write request with range below the current offset
     if (offset + count <= cachedOffset) {
@@ -576,8 +555,8 @@ class OpenFileCtx {
     
     // Fail non-append call
     if (offset < cachedOffset) {
-      LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + ","
-          + nextOffset + ")");
+      LOG.warn("(offset,count,nextOffset): ({},{},{})",
+          offset, count, nextOffset);
       return null;
     } else {
       DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
@@ -586,10 +565,8 @@ class OpenFileCtx {
           request.getOffset(), request.getCount(), originalCount,
           request.getStableHow(), request.getData(), channel, xid, false,
           dataState);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Add new write to the list with nextOffset " + cachedOffset
-            + " and requested offset=" + offset);
-      }
+      LOG.debug("Add new write to the list with nextOffset {}" +
+          " and requested offset={}", cachedOffset, offset);
       if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
         // update the memory size
         updateNonSequentialWriteInMemory(count);
@@ -598,14 +575,12 @@ class OpenFileCtx {
       WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
       if (oldWriteCtx == null) {
         pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New write buffered with xid " + xid + " nextOffset "
-              + cachedOffset + " req offset=" + offset + " mapsize="
-              + pendingWrites.size());
-        }
+        LOG.debug("New write buffered with xid {} nextOffset {}" +
+            "req offset={} mapsize={}",
+            xid, cachedOffset, offset, pendingWrites.size());
       } else {
-        LOG.warn("Got a repeated request, same range, with xid: " + xid
-            + " nextOffset " + +cachedOffset + " req offset=" + offset);
+        LOG.warn("Got a repeated request, same range, with xid: " +
+            "{} nextOffset {} req offset={}", xid, cachedOffset, offset);
       }
       return writeCtx;
     }
@@ -625,9 +600,7 @@ class OpenFileCtx {
       response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0,
           WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Process perfectOverWrite");
-      }
+      LOG.debug("Process perfectOverWrite");
       // TODO: let executor handle perfect overwrite
       response = processPerfectOverWrite(dfsClient, offset, count, stableHow,
           request.getData().array(),
@@ -652,17 +625,13 @@ class OpenFileCtx {
     
     if (writeCtx.getOffset() == nextOffset.get()) {
       if (!asyncStatus) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Trigger the write back task. Current nextOffset: "
-              + nextOffset.get());
-        }
+        LOG.debug("Trigger the write back task. Current nextOffset: {}",
+            nextOffset.get());
         asyncStatus = true;
         asyncWriteBackStartOffset = writeCtx.getOffset();
         asyncDataService.execute(new AsyncDataService.WriteBackTask(this));
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The write back thread is working.");
-        }
+        LOG.debug("The write back thread is working.");
       }
       return true;
     } else {
@@ -694,15 +663,13 @@ class OpenFileCtx {
         // responses of the previous batch. So here send response immediately
         // for unstable non-sequential write
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Have to change stable write to unstable write: "
-              + request.getStableHow());
+          LOG.info("Have to change stable write to unstable write: {}",
+              request.getStableHow());
           stableHow = WriteStableHow.UNSTABLE;
         }
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("UNSTABLE write request, send response for offset: "
-              + writeCtx.getOffset());
-        }
+        LOG.debug("UNSTABLE write request, send response for offset: {}",
+            writeCtx.getOffset());
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
@@ -738,8 +705,8 @@ class OpenFileCtx {
       LOG.info("The FSDataOutputStream has been closed. "
           + "Continue processing the perfect overwrite.");
     } catch (IOException e) {
-      LOG.info("hsync failed when processing possible perfect overwrite, path="
-          + path + " error: " + e);
+      LOG.info("hsync failed when processing possible perfect overwrite, " +
+              "path={} error: {}", path, e.toString());
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
     }
@@ -748,18 +715,18 @@ class OpenFileCtx {
       fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
       readCount = fis.read(offset, readbuffer, 0, count);
       if (readCount < count) {
-        LOG.error("Can't read back " + count + " bytes, partial read size: "
-            + readCount);
+        LOG.error("Can't read back {} bytes, partial read size: {}",
+            count, readCount);
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
     } catch (IOException e) {
-      LOG.info("Read failed when processing possible perfect overwrite, path="
-          + path, e);
+      LOG.info("Read failed when processing possible perfect overwrite, " +
+              "path={}", path, e);
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
     } finally {
-      IOUtils.cleanup(LOG, fis);
+      IOUtils.cleanupWithLogger(LOG, fis);
     }
 
     // Compare with the request
@@ -776,8 +743,8 @@ class OpenFileCtx {
         dfsClient.setTimes(path, Time.monotonicNow(), -1);
         postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
       } catch (IOException e) {
-        LOG.info("Got error when processing perfect overwrite, path=" + path
-            + " error: " + e);
+        LOG.info("Got error when processing perfect overwrite, path={} " +
+            "error: {}", path, e.toString());
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
@@ -810,9 +777,7 @@ class OpenFileCtx {
 
     COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
         preOpAttr, fromRead);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got commit status: " + ret.name());
-    }
+    LOG.debug("Got commit status: {}", ret.name());
     // Do the sync outside the lock
     if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
         || ret == COMMIT_STATUS.COMMIT_FINISHED) {
@@ -828,7 +793,7 @@ class OpenFileCtx {
           ret = COMMIT_STATUS.COMMIT_ERROR;
         }
       } catch (IOException e) {
-        LOG.error("Got stream error during data sync: " + e);
+        LOG.error("Got stream error during data sync", e);
         // Do nothing. Stream will be closed eventually by StreamMonitor.
         // status = Nfs3Status.NFS3ERR_IO;
         ret = COMMIT_STATUS.COMMIT_ERROR;
@@ -867,9 +832,7 @@ class OpenFileCtx {
       CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
       pendingCommits.put(commitOffset, commitCtx);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("return COMMIT_SPECIAL_WAIT");
-    }
+    LOG.debug("return COMMIT_SPECIAL_WAIT");
     return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
   }
   
@@ -886,10 +849,8 @@ class OpenFileCtx {
     }
     
     long flushed = getFlushedOffset();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset
-          + "nextOffset=" + nextOffset.get());
-    }
+    LOG.debug("getFlushedOffset={} commitOffset={} nextOffset={}",
+        flushed, commitOffset, nextOffset.get());
     
     if (pendingWrites.isEmpty()) {
       if (aixCompatMode) {
@@ -898,10 +859,8 @@ class OpenFileCtx {
         return COMMIT_STATUS.COMMIT_FINISHED;
       } else {
         if (flushed < nextOffset.get()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("get commit while still writing to the requested offset,"
-                + " with empty queue");
-          }
+          LOG.debug("get commit while still writing to the requested offset,"
+              + " with empty queue");
           return handleSpecialWait(fromRead, nextOffset.get(), channel, xid,
               preOpAttr);
         } else {
@@ -920,18 +879,14 @@ class OpenFileCtx {
       if (co <= flushed) {
         return COMMIT_STATUS.COMMIT_DO_SYNC;
       } else if (co < nextOffset.get()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("get commit while still writing to the requested offset");
-        }
+        LOG.debug("get commit while still writing to the requested offset");
         return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
       } else {
         // co >= nextOffset
         if (checkSequential(co, nextOffset.get())) {
           return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("return COMMIT_SPECIAL_SUCCESS");
-          }
+          LOG.debug("return COMMIT_SPECIAL_SUCCESS");
           return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
         }
       }
@@ -993,8 +948,8 @@ class OpenFileCtx {
     // Check the stream timeout
     if (checkStreamTimeout(streamTimeout)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("stream can be closed for fileId: "
-            + handle.dumpFileHandle());
+        LOG.debug("stream can be closed for fileId: {}",
+            handle.dumpFileHandle());
       }
       flag = true;
     }
@@ -1009,10 +964,8 @@ class OpenFileCtx {
    */
   private synchronized WriteCtx offerNextToWrite() {
     if (pendingWrites.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("The async write task has no pending writes, fileId: "
-            + latestAttr.getFileId());
-      }
+      LOG.debug("The async write task has no pending writes, fileId: {}",
+          latestAttr.getFileId());
       // process pending commit again to handle this race: a commit is added
       // to pendingCommits map just after the last doSingleWrite returns.
       // There is no pending write and the commit should be handled by the
@@ -1029,49 +982,35 @@ class OpenFileCtx {
     OffsetRange range = lastEntry.getKey();
     WriteCtx toWrite = lastEntry.getValue();
 
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("range.getMin()=" + range.getMin() + " nextOffset="
-          + nextOffset);
-    }
+    LOG.trace("range.getMin()={} nextOffset={}",
+        range.getMin(), nextOffset);
 
     long offset = nextOffset.get();
     if (range.getMin() > offset) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("The next sequential write has not arrived yet");
-      }
+      LOG.debug("The next sequential write has not arrived yet");
       processCommits(nextOffset.get()); // handle race
       this.asyncStatus = false;
     } else if (range.getMax() <= offset) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Remove write " + range.toString()
-            + " which is already written from the list");
-      }
+      LOG.debug("Remove write {} which is already written from the list",
+          range);
       // remove the WriteCtx from cache
       pendingWrites.remove(range);
     } else if (range.getMin() < offset && range.getMax() > offset) {
-      LOG.warn("Got an overlapping write " + range.toString()
-          + ", nextOffset=" + offset
-          + ". Remove and trim it");
+      LOG.warn("Got an overlapping write {}, nextOffset={}. " +
+          "Remove and trim it", range, offset);
       pendingWrites.remove(range);
       trimWriteRequest(toWrite, offset);
       // update nextOffset
       nextOffset.addAndGet(toWrite.getCount());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Change nextOffset (after trim) to " + nextOffset.get());
-      }
+      LOG.debug("Change nextOffset (after trim) to {}", nextOffset.get());
       return toWrite;
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Remove write " + range.toString()
-            + " from the list");
-      }
+      LOG.debug("Remove write {} from the list", range);
       // after writing, remove the WriteCtx from cache
       pendingWrites.remove(range);
       // update nextOffset
       nextOffset.addAndGet(toWrite.getCount());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Change nextOffset to " + nextOffset.get());
-      }
+      LOG.debug("Change nextOffset to {}", nextOffset.get());
       return toWrite;
     }
     return null;
@@ -1095,9 +1034,9 @@ class OpenFileCtx {
         }
       }
       
-      if (!activeState && LOG.isDebugEnabled()) {
-        LOG.debug("The openFileCtx is not active anymore, fileId: "
-            + latestAttr.getFileId());
+      if (!activeState) {
+        LOG.debug("The openFileCtx is not active anymore, fileId: {}",
+            latestAttr.getFileId());
       }
     } finally {
       // Make sure to reset asyncStatus to false unless a race happens
@@ -1105,11 +1044,12 @@ class OpenFileCtx {
         if (startOffset == asyncWriteBackStartOffset) {
           asyncStatus = false;
         } else {
-          LOG.info("Another async task is already started before this one"
-              + " is finalized. fileId: " + latestAttr.getFileId()
-              + " asyncStatus: " + asyncStatus + " original startOffset: "
-              + startOffset + " new startOffset: " + asyncWriteBackStartOffset
-              + ". Won't change asyncStatus here.");
+          LOG.info("Another async task is already started before this one " +
+                  "is finalized. fileId: {} asyncStatus: {} " +
+                  "original startOffset: {} " +
+                  "new startOffset: {}. Won't change asyncStatus here.",
+              latestAttr.getFileId(), asyncStatus,
+              startOffset, asyncWriteBackStartOffset);
         }
       }
     }
@@ -1132,8 +1072,8 @@ class OpenFileCtx {
       status = Nfs3Status.NFS3_OK;
     } catch (ClosedChannelException cce) {
       if (!pendingWrites.isEmpty()) {
-        LOG.error("Can't sync for fileId: " + latestAttr.getFileId()
-            + ". Channel closed with writes pending.", cce);
+        LOG.error("Can't sync for fileId: {}. " +
+            "Channel closed with writes pending", latestAttr.getFileId(), cce);
       }
       status = Nfs3Status.NFS3ERR_IO;
     } catch (IOException e) {
@@ -1152,8 +1092,8 @@ class OpenFileCtx {
     }
 
     if (latestAttr.getSize() != offset) {
-      LOG.error("After sync, the expect file size: " + offset
-          + ", however actual file size is: " + latestAttr.getSize());
+      LOG.error("After sync, the expect file size: {}, " +
+          "however actual file size is: {}", offset, latestAttr.getSize());
       status = Nfs3Status.NFS3ERR_IO;
     }
     WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
@@ -1170,11 +1110,11 @@ class OpenFileCtx {
       Nfs3Utils.writeChannelCommit(commit.getChannel(), response
           .serialize(new XDR(), commit.getXid(),
               new VerifierNone()), commit.getXid());
-      
+
       if (LOG.isDebugEnabled()) {
-        LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: "
-            + Nfs3Utils.getElapsedTime(commit.startTime)
-            + "ns. Sent response for commit: " + commit);
+        LOG.debug("FileId: {} Service time: {}ns. " +
+                "Sent response for commit: {}", latestAttr.getFileId(),
+            Nfs3Utils.getElapsedTime(commit.startTime), commit);
       }
       entry = pendingCommits.firstEntry();
     }
@@ -1190,8 +1130,8 @@ class OpenFileCtx {
     
     FileHandle handle = writeCtx.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("do write, fileHandle " + handle.dumpFileHandle() + " offset: "
-          + offset + " length: " + count + " stableHow: " + stableHow.name());
+      LOG.debug("do write, fileHandle {} offset: {} length: {} stableHow: {}",
+          handle.dumpFileHandle(), offset, count, stableHow.name());
     }
 
     try {
@@ -1215,10 +1155,10 @@ class OpenFileCtx {
             writeCtx.setDataState(WriteCtx.DataState.NO_DUMP);
             updateNonSequentialWriteInMemory(-count);
             if (LOG.isDebugEnabled()) {
-              LOG.debug("After writing " + handle.dumpFileHandle()
-                  + " at offset " + offset
-                  + ", updated the memory count, new value: "
-                  + nonSequentialWriteInMemory.get());
+              LOG.debug("After writing {} at offset {}, " +
+                      "updated the memory count, new value: {}",
+                  handle.dumpFileHandle(), offset,
+                  nonSequentialWriteInMemory.get());
             }
           }
         }
@@ -1226,7 +1166,7 @@ class OpenFileCtx {
       
       if (!writeCtx.getReplied()) {
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Do sync for stable write: " + writeCtx);
+          LOG.info("Do sync for stable write: {}", writeCtx);
           try {
             if (stableHow == WriteStableHow.DATA_SYNC) {
               fos.hsync();
@@ -1237,7 +1177,7 @@ class OpenFileCtx {
               fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
             }
           } catch (IOException e) {
-            LOG.error("hsync failed with writeCtx: " + writeCtx, e);
+            LOG.error("hsync failed with writeCtx: {}", writeCtx, e);
             throw e;
           }
         }
@@ -1245,8 +1185,8 @@ class OpenFileCtx {
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
-          LOG.warn("Return original count: " + writeCtx.getOriginalCount()
-              + " instead of real data count: " + count);
+          LOG.warn("Return original count: {} instead of real data count: {}",
+              writeCtx.getOriginalCount(), count);
           count = writeCtx.getOriginalCount();
         }
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
@@ -1260,8 +1200,8 @@ class OpenFileCtx {
       processCommits(writeCtx.getOffset() + writeCtx.getCount());
      
     } catch (IOException e) {
-      LOG.error("Error writing to fileHandle " + handle.dumpFileHandle()
-          + " at offset " + offset + " and length " + count, e);
+      LOG.error("Error writing to fileHandle {} at offset {} and length {}",
+          handle.dumpFileHandle(), offset, count, e);
       if (!writeCtx.getReplied()) {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
         Nfs3Utils.writeChannel(channel, response.serialize(
@@ -1269,8 +1209,8 @@ class OpenFileCtx {
         // Keep stream open. Either client retries or SteamMonitor closes it.
       }
 
-      LOG.info("Clean up open file context for fileId: "
-          + latestAttr.getFileId());
+      LOG.info("Clean up open file context for fileId: {}",
+          latestAttr.getFileId());
       cleanup();
     }
   }
@@ -1297,17 +1237,16 @@ class OpenFileCtx {
         fos.close();
       }
     } catch (IOException e) {
-      LOG.info("Can't close stream for fileId: " + latestAttr.getFileId()
-          + ", error: " + e);
+      LOG.info("Can't close stream for fileId: {}, error: {}",
+          latestAttr.getFileId(), e.toString());
     }
     
     // Reply error for pending writes
-    LOG.info("There are " + pendingWrites.size() + " pending writes.");
+    LOG.info("There are {} pending writes.", pendingWrites.size());
     WccAttr preOpAttr = latestAttr.getWccAttr();
     while (!pendingWrites.isEmpty()) {
       OffsetRange key = pendingWrites.firstKey();
-      LOG.info("Fail pending write: " + key.toString()
-          + ", nextOffset=" + nextOffset.get());
+      LOG.info("Fail pending write: {}, nextOffset={}", key, nextOffset.get());
       
       WriteCtx writeCtx = pendingWrites.remove(key);
       if (!writeCtx.getReplied()) {
@@ -1325,11 +1264,12 @@ class OpenFileCtx {
       try {
         dumpOut.close();
       } catch (IOException e) {
-        LOG.error("Failed to close outputstream of dump file" + dumpFilePath, e);
+        LOG.error("Failed to close outputstream of dump file {}",
+            dumpFilePath, e);
       }
       File dumpFile = new File(dumpFilePath);
       if (dumpFile.exists() && !dumpFile.delete()) {
-        LOG.error("Failed to delete dumpfile: " + dumpFile);
+        LOG.error("Failed to delete dumpfile: {}", dumpFile);
       }
     }
     if (raf != null) {

File diff suppressed because it is too large
+ 154 - 187
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java


+ 92 - 119
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -477,7 +477,7 @@ public class DataNode extends ReconfigurableBase
               HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT)) {
       String reason = DomainSocket.getLoadingFailureReason();
       if (reason != null) {
-        LOG.warn("File descriptor passing is disabled because " + reason);
+        LOG.warn("File descriptor passing is disabled because {}", reason);
         this.fileDescriptorPassingDisabledReason = reason;
       } else {
         LOG.info("File descriptor passing is enabled.");
@@ -493,7 +493,7 @@ public class DataNode extends ReconfigurableBase
 
     try {
       hostName = getHostName(conf);
-      LOG.info("Configured hostname is " + hostName);
+      LOG.info("Configured hostname is {}", hostName);
       startDataNode(dataDirs, resources);
     } catch (IOException ie) {
       shutdown();
@@ -533,7 +533,7 @@ public class DataNode extends ReconfigurableBase
       case DFS_DATANODE_DATA_DIR_KEY: {
         IOException rootException = null;
         try {
-          LOG.info("Reconfiguring " + property + " to " + newVal);
+          LOG.info("Reconfiguring {} to {}", property, newVal);
           this.refreshVolumes(newVal);
           return getConf().get(DFS_DATANODE_DATA_DIR_KEY);
         } catch (IOException e) {
@@ -545,7 +545,7 @@ public class DataNode extends ReconfigurableBase
                 new BlockReportOptions.Factory().setIncremental(false).build());
           } catch (IOException e) {
             LOG.warn("Exception while sending the block report after refreshing"
-                + " volumes " + property + " to " + newVal, e);
+                + " volumes {} to {}", property, newVal, e);
             if (rootException == null) {
               rootException = e;
             }
@@ -561,7 +561,7 @@ public class DataNode extends ReconfigurableBase
       case DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY: {
         ReconfigurationException rootException = null;
         try {
-          LOG.info("Reconfiguring " + property + " to " + newVal);
+          LOG.info("Reconfiguring {} to {}", property, newVal);
           int movers;
           if (newVal == null) {
             // set to default
@@ -696,8 +696,8 @@ public class DataNode extends ReconfigurableBase
       // New conf doesn't have the storage location which available in
       // the current storage locations. Add to the deactivateLocations list.
       if (!found) {
-        LOG.info("Deactivation request received for active volume: "
-            + dir.getRoot().toString());
+        LOG.info("Deactivation request received for active volume: {}",
+            dir.getRoot());
         results.deactivateLocations.add(
             StorageLocation.parse(dir.getRoot().toString()));
       }
@@ -724,8 +724,8 @@ public class DataNode extends ReconfigurableBase
         // New conf doesn't have this failed storage location.
         // Add to the deactivate locations list.
         if (!found) {
-          LOG.info("Deactivation request received for failed volume: "
-              + failedStorageLocation);
+          LOG.info("Deactivation request received for failed volume: {}",
+              failedStorageLocation);
           results.deactivateLocations.add(StorageLocation.parse(
               failedStorageLocation));
         }
@@ -760,7 +760,7 @@ public class DataNode extends ReconfigurableBase
         throw new IOException("Attempt to remove all volumes.");
       }
       if (!changedVolumes.newLocations.isEmpty()) {
-        LOG.info("Adding new volumes: " +
+        LOG.info("Adding new volumes: {}",
             Joiner.on(",").join(changedVolumes.newLocations));
 
         // Add volumes for each Namespace
@@ -794,16 +794,16 @@ public class DataNode extends ReconfigurableBase
               errorMessageBuilder.append(
                   String.format("FAILED TO ADD: %s: %s%n",
                   volume, ioe.getMessage()));
-              LOG.error("Failed to add volume: " + volume, ioe);
+              LOG.error("Failed to add volume: {}", volume, ioe);
             } else {
               effectiveVolumes.add(volume.toString());
-              LOG.info("Successfully added volume: " + volume);
+              LOG.info("Successfully added volume: {}", volume);
             }
           } catch (Exception e) {
             errorMessageBuilder.append(
                 String.format("FAILED to ADD: %s: %s%n", volume,
                               e.toString()));
-            LOG.error("Failed to add volume: " + volume, e);
+            LOG.error("Failed to add volume: {}", volume, e);
           }
         }
       }
@@ -812,7 +812,7 @@ public class DataNode extends ReconfigurableBase
         removeVolumes(changedVolumes.deactivateLocations);
       } catch (IOException e) {
         errorMessageBuilder.append(e.getMessage());
-        LOG.error("Failed to remove volume: " + e.getMessage(), e);
+        LOG.error("Failed to remove volume", e);
       }
 
       if (errorMessageBuilder.length() > 0) {
@@ -967,16 +967,17 @@ public class DataNode extends ReconfigurableBase
           ServicePlugin.class);
     } catch (RuntimeException e) {
       String pluginsValue = conf.get(DFS_DATANODE_PLUGINS_KEY);
-      LOG.error("Unable to load DataNode plugins. Specified list of plugins: " +
+      LOG.error("Unable to load DataNode plugins. " +
+              "Specified list of plugins: {}",
           pluginsValue, e);
       throw e;
     }
     for (ServicePlugin p: plugins) {
       try {
         p.start(this);
-        LOG.info("Started plug-in " + p);
+        LOG.info("Started plug-in {}", p);
       } catch (Throwable t) {
-        LOG.warn("ServicePlugin " + p + " could not be started", t);
+        LOG.warn("ServicePlugin {} could not be started", p, t);
       }
     }
   }
@@ -1026,7 +1027,7 @@ public class DataNode extends ReconfigurableBase
         traceAdminService,
         ipcServer);
 
-    LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
+    LOG.info("Opened IPC server at {}", ipcServer.getListenerAddress());
 
     // set service-level authorization security policy
     if (getConf().getBoolean(
@@ -1085,8 +1086,9 @@ public class DataNode extends ReconfigurableBase
       directoryScanner = new DirectoryScanner(this, data, conf);
       directoryScanner.start();
     } else {
-      LOG.info("Periodic Directory Tree Verification scan is disabled because " +
-                   reason);
+      LOG.info("Periodic Directory Tree Verification scan " +
+              "is disabled because {}",
+          reason);
     }
   }
   
@@ -1139,7 +1141,7 @@ public class DataNode extends ReconfigurableBase
           dnConf.getTransferSocketRecvBufferSize());
     }
     streamingAddr = tcpPeerServer.getStreamingAddr();
-    LOG.info("Opened streaming server at " + streamingAddr);
+    LOG.info("Opened streaming server at {}", streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     xserver = new DataXceiverServer(tcpPeerServer, getConf(), this);
     this.dataXceiverServer = new Daemon(threadGroup, xserver);
@@ -1157,7 +1159,7 @@ public class DataNode extends ReconfigurableBase
       if (domainPeerServer != null) {
         this.localDataXceiverServer = new Daemon(threadGroup,
             new DataXceiverServer(domainPeerServer, getConf(), this));
-        LOG.info("Listening on UNIX domain socket: " +
+        LOG.info("Listening on UNIX domain socket: {}",
             domainPeerServer.getBindPath());
       }
     }
@@ -1175,7 +1177,7 @@ public class DataNode extends ReconfigurableBase
          (!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
           HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
         LOG.warn("Although short-circuit local reads are configured, " +
-            "they are disabled because you didn't configure " +
+            "they are disabled because you didn't configure {}",
             DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
       }
       return null;
@@ -1205,8 +1207,8 @@ public class DataNode extends ReconfigurableBase
       bpos.notifyNamenodeReceivedBlock(block, delHint, storageUuid,
           isOnTransientStorage);
     } else {
-      LOG.error("Cannot find BPOfferService for reporting block received for bpid="
-          + block.getBlockPoolId());
+      LOG.error("Cannot find BPOfferService for reporting block received " +
+              "for bpid={}", block.getBlockPoolId());
     }
   }
   
@@ -1217,8 +1219,8 @@ public class DataNode extends ReconfigurableBase
     if(bpos != null) {
       bpos.notifyNamenodeReceivingBlock(block, storageUuid);
     } else {
-      LOG.error("Cannot find BPOfferService for reporting block receiving for bpid="
-          + block.getBlockPoolId());
+      LOG.error("Cannot find BPOfferService for reporting block receiving " +
+          "for bpid={}", block.getBlockPoolId());
     }
   }
   
@@ -1239,7 +1241,7 @@ public class DataNode extends ReconfigurableBase
   public void reportBadBlocks(ExtendedBlock block) throws IOException{
     FsVolumeSpi volume = getFSDataset().getVolume(block);
     if (volume == null) {
-      LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+      LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
       return;
     }
     reportBadBlocks(block, volume);
@@ -1382,7 +1384,7 @@ public class DataNode extends ReconfigurableBase
         }
       }
     }
-    LOG.info("Starting DataNode with maxLockedMemory = " +
+    LOG.info("Starting DataNode with maxLockedMemory = {}",
         dnConf.maxLockedMemory);
 
     int volFailuresTolerated = dnConf.getVolFailuresTolerated();
@@ -1409,8 +1411,8 @@ public class DataNode extends ReconfigurableBase
 
     // Login is done by now. Set the DN user name.
     dnUserName = UserGroupInformation.getCurrentUser().getUserName();
-    LOG.info("dnUserName = " + dnUserName);
-    LOG.info("supergroup = " + supergroup);
+    LOG.info("dnUserName = {}", dnUserName);
+    LOG.info("supergroup = {}", supergroup);
     initIpcServer();
 
     metrics = DataNodeMetrics.create(getConf(), getDisplayName());
@@ -1514,8 +1516,8 @@ public class DataNode extends ReconfigurableBase
     if (storage.getDatanodeUuid() == null) {
       storage.setDatanodeUuid(generateUuid());
       storage.writeAll();
-      LOG.info("Generated and persisted new Datanode UUID " +
-               storage.getDatanodeUuid());
+      LOG.info("Generated and persisted new Datanode UUID {}",
+          storage.getDatanodeUuid());
     }
   }
 
@@ -1583,11 +1585,11 @@ public class DataNode extends ReconfigurableBase
     if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
       long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
       long blockTokenLifetime = keys.getTokenLifetime();
-      LOG.info("Block token params received from NN: for block pool " +
-          blockPoolId + " keyUpdateInterval="
-          + blockKeyUpdateInterval / (60 * 1000)
-          + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
-          + " min(s)");
+      LOG.info("Block token params received from NN: " +
+          "for block pool {} keyUpdateInterval={} min(s), " +
+          "tokenLifetime={} min(s)",
+          blockPoolId, blockKeyUpdateInterval / (60 * 1000),
+          blockTokenLifetime / (60 * 1000));
       final boolean enableProtobuf = getConf().getBoolean(
           DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
           DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
@@ -1690,9 +1692,10 @@ public class DataNode extends ReconfigurableBase
         storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
       }
       final StorageInfo bpStorage = storage.getBPStorage(bpid);
-      LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
-          + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
-          + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
+      LOG.info("Setting up storage: nsid={};bpid={};lv={};" +
+              "nsInfo={};dnuuid={}",
+          bpStorage.getNamespaceID(), bpid, storage.getLayoutVersion(),
+          nsInfo, storage.getDatanodeUuid());
     }
 
     // If this is a newly formatted DataNode then assign a new DatanodeUuid.
@@ -1802,9 +1805,8 @@ public class DataNode extends ReconfigurableBase
       final boolean connectToDnViaHostname) throws IOException {
     final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
     final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
-    }
+    LOG.debug("Connecting to datanode {} addr={}",
+        dnAddr, addr);
     final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
     try {
       return loginUgi
@@ -1868,20 +1870,15 @@ public class DataNode extends ReconfigurableBase
     checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ);
     Preconditions.checkNotNull(data, "Storage not yet initialized");
     BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
-    if (LOG.isDebugEnabled()) {
-      if (info != null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("getBlockLocalPathInfo successful block=" + block
-              + " blockfile " + info.getBlockPath() + " metafile "
-              + info.getMetaPath());
-        }
-      } else {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("getBlockLocalPathInfo for block=" + block
-              + " returning null");
-        }
-      }
+    if (info != null) {
+      LOG.trace("getBlockLocalPathInfo successful " +
+          "block={} blockfile {} metafile {}",
+          block, info.getBlockPath(), info.getMetaPath());
+    } else {
+      LOG.trace("getBlockLocalPathInfo for block={} " +
+          "returning null", block);
     }
+
     metrics.incrBlocksGetLocalPathInfo();
     return info;
   }
@@ -1939,9 +1936,7 @@ public class DataNode extends ReconfigurableBase
       ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
       DataInputStream in = new DataInputStream(buf);
       id.readFields(in);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got: " + id.toString());
-      }
+      LOG.debug("Got: {}", id);
       blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
           null, null);
     }
@@ -1959,9 +1954,9 @@ public class DataNode extends ReconfigurableBase
       for (ServicePlugin p : plugins) {
         try {
           p.stop();
-          LOG.info("Stopped plug-in " + p);
+          LOG.info("Stopped plug-in {}", p);
         } catch (Throwable t) {
-          LOG.warn("ServicePlugin " + p + " could not be stopped", t);
+          LOG.warn("ServicePlugin {} could not be stopped", p, t);
         }
       }
     }
@@ -1984,7 +1979,7 @@ public class DataNode extends ReconfigurableBase
         this.dataXceiverServer.interrupt();
       } catch (Exception e) {
         // Ignore, since the out of band messaging is advisory.
-        LOG.trace("Exception interrupting DataXceiverServer: ", e);
+        LOG.trace("Exception interrupting DataXceiverServer", e);
       }
     }
 
@@ -2038,7 +2033,7 @@ public class DataNode extends ReconfigurableBase
           this.threadGroup.interrupt();
           break;
         }
-        LOG.info("Waiting for threadgroup to exit, active threads is " +
+        LOG.info("Waiting for threadgroup to exit, active threads is {}",
                  this.threadGroup.activeCount());
         if (this.threadGroup.activeCount() == 0) {
           break;
@@ -2085,7 +2080,7 @@ public class DataNode extends ReconfigurableBase
       try {
         this.blockPoolManager.shutDownAll(bposArray);
       } catch (InterruptedException ie) {
-        LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
+        LOG.warn("Received exception in BlockPoolManager#shutDownAll", ie);
       }
     }
     
@@ -2093,7 +2088,7 @@ public class DataNode extends ReconfigurableBase
       try {
         this.storage.unlockAll();
       } catch (IOException ie) {
-        LOG.warn("Exception when unlocking storage: " + ie, ie);
+        LOG.warn("Exception when unlocking storage", ie);
       }
     }
     if (data != null) {
@@ -2140,8 +2135,8 @@ public class DataNode extends ReconfigurableBase
 
   private void handleDiskError(String failedVolumes) {
     final boolean hasEnoughResources = data.hasEnoughResource();
-    LOG.warn("DataNode.handleDiskError on : [" + failedVolumes +
-        "] Keep Running: " + hasEnoughResources);
+    LOG.warn("DataNode.handleDiskError on: " +
+        "[{}] Keep Running: {}", failedVolumes, hasEnoughResources);
     
     // If we have enough active valid volumes then we do not want to 
     // shutdown the DN completely.
@@ -2438,15 +2433,13 @@ public class DataNode extends ReconfigurableBase
         String[] targetStorageIds, ExtendedBlock b,
         BlockConstructionStage stage, final String clientname) {
       if (DataTransferProtocol.LOG.isDebugEnabled()) {
-        DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
-            + b + " (numBytes=" + b.getNumBytes() + ")"
-            + ", stage=" + stage
-            + ", clientname=" + clientname
-            + ", targets=" + Arrays.asList(targets)
-            + ", target storage types=" + (targetStorageTypes == null ? "[]" :
-            Arrays.asList(targetStorageTypes))
-            + ", target storage IDs=" + (targetStorageIds == null ? "[]" :
-            Arrays.asList(targetStorageIds)));
+        DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
+                "clientname={}, targets={}, target storage types={}, " +
+                "target storage IDs={}", getClass().getSimpleName(), b,
+            b.getNumBytes(), stage, clientname, Arrays.asList(targets),
+            targetStorageTypes == null ? "[]" :
+                Arrays.asList(targetStorageTypes),
+            targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
       }
       this.targets = targets;
       this.targetStorageTypes = targetStorageTypes;
@@ -2475,9 +2468,7 @@ public class DataNode extends ReconfigurableBase
       try {
         final String dnAddr = targets[0].getXferAddr(connectToDnViaHostname);
         InetSocketAddress curTarget = NetUtils.createSocketAddr(dnAddr);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + dnAddr);
-        }
+        LOG.debug("Connecting to datanode {}", dnAddr);
         sock = newSocket();
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         sock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
@@ -2521,17 +2512,15 @@ public class DataNode extends ReconfigurableBase
         blockSender.sendBlock(out, unbufOut, null);
 
         // no response necessary
-        LOG.info(getClass().getSimpleName() + ", at "
-            + DataNode.this.getDisplayName() + ": Transmitted " + b
-            + " (numBytes=" + b.getNumBytes() + ") to " + curTarget);
+        LOG.info("{}, at {}: Transmitted {} (numBytes={}) to {}",
+            getClass().getSimpleName(), DataNode.this.getDisplayName(),
+            b, b.getNumBytes(), curTarget);
 
         // read ack
         if (isClient) {
           DNTransferAckProto closeAck = DNTransferAckProto.parseFrom(
               PBHelperClient.vintPrefixed(in));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck);
-          }
+          LOG.debug("{}: close-ack={}", getClass().getSimpleName(), closeAck);
           if (closeAck.getStatus() != Status.SUCCESS) {
             if (closeAck.getStatus() == Status.ERROR_ACCESS_TOKEN) {
               throw new InvalidBlockTokenException(
@@ -2550,17 +2539,11 @@ public class DataNode extends ReconfigurableBase
           // Add the block to the front of the scanning queue if metadata file
           // is corrupt. We already add the block to front of scanner if the
           // peer disconnects.
-          LOG.info("Adding block: " + b + " for scanning");
+          LOG.info("Adding block: {} for scanning", b);
           blockScanner.markSuspectBlock(data.getVolume(b).getStorageID(), b);
         }
-        LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
-            targets[0] + " got ", ie);
-        // disk check moved to FileIoProvider
-        IOException cause = DatanodeUtil.getCauseIfDiskError(ie);
-        if (cause != null) { // possible disk error
-          LOG.warn("IOException in DataTransfer#run() "+ ie.getMessage() +". "
-                  + "Cause is ", cause);
-        }
+        LOG.warn("{}:Failed to transfer {} to {} got",
+            bpReg, b, targets[0], ie);
       } finally {
         decrementXmitsInProgress();
         IOUtils.closeStream(blockSender);
@@ -2691,14 +2674,9 @@ public class DataNode extends ReconfigurableBase
       final StorageLocation location;
       try {
         location = StorageLocation.parse(locationString);
-      } catch (IOException ioe) {
-        LOG.error("Failed to initialize storage directory " + locationString
-            + ". Exception details: " + ioe);
-        // Ignore the exception.
-        continue;
-      } catch (SecurityException se) {
-        LOG.error("Failed to initialize storage directory " + locationString
-                     + ". Exception details: " + se);
+      } catch (IOException | SecurityException ioe) {
+        LOG.error("Failed to initialize storage directory {}." +
+            "Exception details: {}", locationString, ioe.toString());
         // Ignore the exception.
         continue;
       }
@@ -2745,7 +2723,7 @@ public class DataNode extends ReconfigurableBase
           wait(2000);
         }
       } catch (InterruptedException ex) {
-        LOG.warn("Received exception in Datanode#join: " + ex);
+        LOG.warn("Received exception in Datanode#join: {}", ex.toString());
       }
     }
   }
@@ -2950,9 +2928,7 @@ public class DataNode extends ReconfigurableBase
       }
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Got: " + id.toString());
-        }
+        LOG.debug("Got: {}", id);
         blockPoolTokenSecretManager.checkAccess(id, null, block,
             BlockTokenIdentifier.AccessMode.READ, null, null);
       }
@@ -3165,11 +3141,11 @@ public class DataNode extends ReconfigurableBase
   public void deleteBlockPool(String blockPoolId, boolean force)
       throws IOException {
     checkSuperuserPrivilege();
-    LOG.info("deleteBlockPool command received for block pool " + blockPoolId
-        + ", force=" + force);
+    LOG.info("deleteBlockPool command received for block pool {}, " +
+        "force={}", blockPoolId, force);
     if (blockPoolManager.get(blockPoolId) != null) {
-      LOG.warn("The block pool "+blockPoolId+
-          " is still running, cannot be deleted.");
+      LOG.warn("The block pool {} is still running, cannot be deleted.",
+          blockPoolId);
       throw new IOException(
           "The block pool is still running. First do a refreshNamenodes to " +
           "shutdown the block pool service");
@@ -3181,8 +3157,8 @@ public class DataNode extends ReconfigurableBase
   @Override // ClientDatanodeProtocol
   public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
     checkSuperuserPrivilege();
-    LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
-        "). Shutting down Datanode...");
+    LOG.info("shutdownDatanode command received (upgrade={}). " +
+        "Shutting down Datanode...", forUpgrade);
 
     // Shutdown can be called only once.
     if (shutdownInProgress) {
@@ -3381,12 +3357,9 @@ public class DataNode extends ReconfigurableBase
       // Remove all unhealthy volumes from DataNode.
       removeVolumes(unhealthyLocations, false);
     } catch (IOException e) {
-      LOG.warn("Error occurred when removing unhealthy storage dirs: "
-          + e.getMessage(), e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(sb.toString());
+      LOG.warn("Error occurred when removing unhealthy storage dirs", e);
     }
+    LOG.debug("{}", sb);
       // send blockreport regarding volume failure
     handleDiskError(sb.toString());
   }
@@ -3568,7 +3541,7 @@ public class DataNode extends ReconfigurableBase
     case DiskBalancerConstants.DISKBALANCER_BANDWIDTH :
       return Long.toString(this.diskBalancer.getBandwidth());
     default:
-      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " +
+      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: {}",
           key);
       throw new DiskBalancerException("Unknown key",
           DiskBalancerException.Result.UNKNOWN_KEY);

+ 79 - 93
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -148,10 +148,8 @@ class DataXceiver extends Receiver implements Runnable {
         (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
     localAddress = peer.getLocalAddressString();
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Number of active connections is: "
-          + datanode.getXceiverCount());
-    }
+    LOG.debug("Number of active connections is: {}",
+        datanode.getXceiverCount());
   }
 
   /**
@@ -187,7 +185,7 @@ class DataXceiver extends Receiver implements Runnable {
     // This doesn't need to be in a critical section. Althogh the client
     // can resue the connection to issue a different request, trying sending
     // an OOB through the recently closed block receiver is harmless.
-    LOG.info("Sending OOB to peer: " + peer);
+    LOG.info("Sending OOB to peer: {}", peer);
     br.sendOOB();
   }
 
@@ -199,7 +197,7 @@ class DataXceiver extends Receiver implements Runnable {
       }
       xceiver.interrupt();
     }
-    LOG.info("Stopped the writer: " + peer);
+    LOG.info("Stopped the writer: {}", peer);
   }
 
   /**
@@ -239,14 +237,15 @@ class DataXceiver extends Receiver implements Runnable {
       } catch (InvalidMagicNumberException imne) {
         if (imne.isHandshake4Encryption()) {
           LOG.info("Failed to read expected encryption handshake from client " +
-              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
+              "at {}. Perhaps the client " +
               "is running an older version of Hadoop which does not support " +
-              "encryption", imne);
+              "encryption", peer.getRemoteAddressString(), imne);
         } else {
           LOG.info("Failed to read expected SASL data transfer protection " +
-              "handshake from client at " + peer.getRemoteAddressString() + 
+              "handshake from client at {}" +
               ". Perhaps the client is running an older version of Hadoop " +
-              "which does not support SASL data transfer protection", imne);
+              "which does not support SASL data transfer protection",
+              peer.getRemoteAddressString(), imne);
         }
         return;
       }
@@ -302,7 +301,7 @@ class DataXceiver extends Receiver implements Runnable {
         if (LOG.isTraceEnabled()) {
           LOG.trace(s, t);
         } else {
-          LOG.info(s + "; " + t);
+          LOG.info("{}; {}", s, t.toString());
         }
       } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
         String s1 =
@@ -311,23 +310,19 @@ class DataXceiver extends Receiver implements Runnable {
         if (LOG.isTraceEnabled()) {
           LOG.trace(s1, t);
         } else {
-          LOG.info(s1 + "; " + t);          
+          LOG.info("{}; {}", s1, t.toString());
         }
       } else if (t instanceof InvalidToken) {
         // The InvalidToken exception has already been logged in
         // checkAccess() method and this is not a server error.
-        if (LOG.isTraceEnabled()) {
-          LOG.trace(s, t);
-        }
+        LOG.trace(s, t);
       } else {
         LOG.error(s, t);
       }
     } finally {
       collectThreadLocalStates();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
-            + datanode.getXceiverCount());
-      }
+      LOG.debug("{}:Number of active connections is: {}",
+          datanode.getDisplayName(), datanode.getXceiverCount());
       updateCurrentThreadName("Cleaning up");
       if (peer != null) {
         dataXceiverServer.closePeer(peer);
@@ -405,21 +400,22 @@ class DataXceiver extends Receiver implements Runnable {
         DomainSocket sock = peer.getDomainSocket();
         sock.sendFileDescriptors(fds, buf, 0, buf.length);
         if (supportsReceiptVerification) {
-          LOG.trace("Reading receipt verification byte for " + slotId);
+          LOG.trace("Reading receipt verification byte for {}", slotId);
           int val = sock.getInputStream().read();
           if (val < 0) {
             throw new EOFException();
           }
         } else {
-          LOG.trace("Receipt verification is not enabled on the DataNode.  " +
-                    "Not verifying " + slotId);
+          LOG.trace("Receipt verification is not enabled on the DataNode. " +
+                    "Not verifying {}", slotId);
         }
         success = true;
       }
     } finally {
       if ((!success) && (registeredSlotId != null)) {
-        LOG.info("Unregistering " + registeredSlotId + " because the " +
-            "requestShortCircuitFdsForRead operation failed.");
+        LOG.info("Unregistering {} because the " +
+            "requestShortCircuitFdsForRead operation failed.",
+            registeredSlotId);
         datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
       }
       if (ClientTraceLog.isInfoEnabled()) {
@@ -547,8 +543,8 @@ class DataXceiver extends Receiver implements Runnable {
         // We don't want to close the socket here, since that might lead to
         // bad behavior inside the poll() call.  See HADOOP-11802 for details.
         try {
-          LOG.warn("Failed to send success response back to the client.  " +
-              "Shutting down socket for " + shmInfo.getShmId() + ".");
+          LOG.warn("Failed to send success response back to the client. " +
+              "Shutting down socket for {}", shmInfo.getShmId());
           sock.shutdown();
         } catch (IOException e) {
           LOG.warn("Failed to shut down socket in error handler", e);
@@ -616,9 +612,9 @@ class DataXceiver extends Receiver implements Runnable {
           ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(
               PBHelperClient.vintPrefixed(in));
           if (!stat.hasStatus()) {
-            LOG.warn("Client " + peer.getRemoteAddressString() +
-                " did not send a valid status code after reading. " +
-                "Will close connection.");
+            LOG.warn("Client {} did not send a valid status code " +
+                "after reading. Will close connection.",
+                peer.getRemoteAddressString());
             IOUtils.closeStream(out);
           }
         } catch (IOException ioe) {
@@ -633,10 +629,8 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrTotalReadTime(duration);
     } catch ( SocketException ignored ) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " +
-            remoteAddress, ignored);
-      }
+      LOG.trace("{}:Ignoring exception while serving {} to {}",
+          dnR, block, remoteAddress, ignored);
       // Its ok for remote side to close the connection anytime.
       datanode.metrics.incrBlocksRead();
       IOUtils.closeStream(out);
@@ -645,8 +639,8 @@ class DataXceiver extends Receiver implements Runnable {
        * Earlier version shutdown() datanode if there is disk error.
        */
       if (!(ioe instanceof SocketTimeoutException)) {
-        LOG.warn(dnR + ":Got exception while serving " + block + " to "
-          + remoteAddress, ioe);
+        LOG.warn("{}:Got exception while serving {} to {}",
+            dnR, block, remoteAddress, ioe);
         incrDatanodeNetworkErrors();
       }
       throw ioe;
@@ -719,19 +713,18 @@ class DataXceiver extends Receiver implements Runnable {
       throw new IOException(stage + " does not support multiple targets "
           + Arrays.asList(targets));
     }
-    
+
     if (LOG.isDebugEnabled()) {
-      LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname 
-      		+ "\n  block  =" + block + ", newGs=" + latestGenerationStamp
-      		+ ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]"
-          + "\n  targets=" + Arrays.asList(targets)
-          + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode
-          + ", pinning=" + pinning);
-      LOG.debug("isDatanode=" + isDatanode
-          + ", isClient=" + isClient
-          + ", isTransfer=" + isTransfer);
-      LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() +
-                " tcp no delay " + peer.getTcpNoDelay());
+      LOG.debug("opWriteBlock: stage={}, clientname={}\n  " +
+              "block  ={}, newGs={}, bytesRcvd=[{}, {}]\n  " +
+              "targets={}; pipelineSize={}, srcDataNode={}, pinning={}",
+          stage, clientname, block, latestGenerationStamp, minBytesRcvd,
+          maxBytesRcvd, Arrays.asList(targets), pipelineSize, srcDataNode,
+          pinning);
+      LOG.debug("isDatanode={}, isClient={}, isTransfer={}",
+          isDatanode, isClient, isTransfer);
+      LOG.debug("writeBlock receive buf size {} tcp no delay {}",
+          peer.getReceiveBufferSize(), peer.getTcpNoDelay());
     }
 
     // We later mutate block's generation stamp and length, but we need to
@@ -741,8 +734,8 @@ class DataXceiver extends Receiver implements Runnable {
     if (block.getNumBytes() == 0) {
       block.setNumBytes(dataXceiverServer.estimateBlockSize);
     }
-    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
-        + localAddress);
+    LOG.info("Receiving {} src: {} dest: {}",
+        block, remoteAddress, localAddress);
 
     DataOutputStream mirrorOut = null;  // stream to next target
     DataInputStream mirrorIn = null;    // reply from next target
@@ -778,9 +771,7 @@ class DataXceiver extends Receiver implements Runnable {
         InetSocketAddress mirrorTarget = null;
         // Connect to backup machine
         mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + mirrorNode);
-        }
+        LOG.debug("Connecting to datanode {}", mirrorNode);
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         try {
@@ -844,11 +835,10 @@ class DataXceiver extends Receiver implements Runnable {
               BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
             mirrorInStatus = connectAck.getStatus();
             firstBadLink = connectAck.getFirstBadLink();
-            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
-              LOG.debug("Datanode " + targets.length +
-                       " got response for connect ack " +
-                       " from downstream datanode with firstbadlink as " +
-                       firstBadLink);
+            if (mirrorInStatus != SUCCESS) {
+              LOG.debug("Datanode {} got response for connect" +
+                  "ack  from downstream datanode with firstbadlink as {}",
+                  targets.length, firstBadLink);
             }
           }
 
@@ -869,13 +859,12 @@ class DataXceiver extends Receiver implements Runnable {
           IOUtils.closeSocket(mirrorSock);
           mirrorSock = null;
           if (isClient) {
-            LOG.error(datanode + ":Exception transfering block " +
-                      block + " to mirror " + mirrorNode + ": " + e);
+            LOG.error("{}:Exception transfering block {} to mirror {}",
+                datanode, block, mirrorNode, e);
             throw e;
           } else {
-            LOG.info(datanode + ":Exception transfering " +
-                     block + " to mirror " + mirrorNode +
-                     "- continuing without the mirror", e);
+            LOG.info("{}:Exception transfering {} to mirror {}- continuing " +
+                "without the mirror", datanode, block, mirrorNode, e);
             incrDatanodeNetworkErrors();
           }
         }
@@ -883,10 +872,9 @@ class DataXceiver extends Receiver implements Runnable {
 
       // send connect-ack to source for clients and not transfer-RBW/Finalized
       if (isClient && !isTransfer) {
-        if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
-          LOG.debug("Datanode " + targets.length +
-                   " forwarding connect ack to upstream firstbadlink is " +
-                   firstBadLink);
+        if (mirrorInStatus != SUCCESS) {
+          LOG.debug("Datanode {} forwarding connect ack to upstream " +
+              "firstbadlink is {}", targets.length, firstBadLink);
         }
         BlockOpResponseProto.newBuilder()
           .setStatus(mirrorInStatus)
@@ -904,9 +892,7 @@ class DataXceiver extends Receiver implements Runnable {
 
         // send close-ack for transfer-RBW/Finalized 
         if (isTransfer) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("TRANSFER: send close-ack");
-          }
+          LOG.trace("TRANSFER: send close-ack");
           writeResponse(SUCCESS, null, replyOut);
         }
       }
@@ -924,15 +910,16 @@ class DataXceiver extends Receiver implements Runnable {
       if (isDatanode ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
-        LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
-            + localAddress + " of size " + block.getNumBytes());
+        LOG.info("Received {} src: {} dest: {} of size {}",
+            block, remoteAddress, localAddress, block.getNumBytes());
       }
 
       if(isClient) {
         size = block.getNumBytes();
       }
     } catch (IOException ioe) {
-      LOG.info("opWriteBlock " + block + " received exception " + ioe);
+      LOG.info("opWriteBlock {} received exception {}",
+          block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -970,7 +957,8 @@ class DataXceiver extends Receiver implements Runnable {
           targetStorageTypes, targetStorageIds, clientName);
       writeResponse(Status.SUCCESS, null, out);
     } catch (IOException ioe) {
-      LOG.info("transferBlock " + blk + " received exception " + ioe);
+      LOG.info("transferBlock {} received exception {}",
+          blk, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1005,7 +993,8 @@ class DataXceiver extends Receiver implements Runnable {
           .writeDelimitedTo(out);
       out.flush();
     } catch (IOException ioe) {
-      LOG.info("blockChecksum " + block + " received exception " + ioe);
+      LOG.info("blockChecksum {} received exception {}",
+          block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1046,8 +1035,8 @@ class DataXceiver extends Receiver implements Runnable {
           .writeDelimitedTo(out);
       out.flush();
     } catch (IOException ioe) {
-      LOG.info("blockChecksum " + stripedBlockInfo.getBlock() +
-          " received exception " + ioe);
+      LOG.info("blockChecksum {} received exception {}",
+          stripedBlockInfo.getBlock(), ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1105,10 +1094,10 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrTotalReadTime(duration);
       
-      LOG.info("Copied " + block + " to " + peer.getRemoteAddressString());
+      LOG.info("Copied {} to {}", block, peer.getRemoteAddressString());
     } catch (IOException ioe) {
       isOpSuccess = false;
-      LOG.info("opCopyBlock " + block + " received exception " + ioe);
+      LOG.info("opCopyBlock {} received exception {}", block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1163,16 +1152,14 @@ class DataXceiver extends Receiver implements Runnable {
         ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
             storageType, storageId);
         if (oldReplica != null) {
-          LOG.info("Moved " + block + " from StorageType "
-              + oldReplica.getVolume().getStorageType() + " to " + storageType);
+          LOG.info("Moved {} from StorageType {} to {}",
+              block, oldReplica.getVolume().getStorageType(), storageType);
         }
       } else {
         block.setNumBytes(dataXceiverServer.estimateBlockSize);
         // get the output stream to the proxy
         final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + dnAddr);
-        }
+        LOG.debug("Connecting to datanode {}", dnAddr);
         InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
         proxySock = datanode.newSocket();
         NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
@@ -1229,8 +1216,8 @@ class DataXceiver extends Receiver implements Runnable {
         datanode.notifyNamenodeReceivedBlock(
             block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
         
-        LOG.info("Moved " + block + " from " + peer.getRemoteAddressString()
-            + ", delHint=" + delHint);
+        LOG.info("Moved {} from {}, delHint={}",
+            block, peer.getRemoteAddressString(), delHint);
       }
     } catch (IOException ioe) {
       opStatus = ERROR;
@@ -1260,7 +1247,8 @@ class DataXceiver extends Receiver implements Runnable {
       try {
         sendResponse(opStatus, errMsg);
       } catch (IOException ioe) {
-        LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
+        LOG.warn("Error writing reply back to {}",
+            peer.getRemoteAddressString());
         incrDatanodeNetworkErrors();
       }
       IOUtils.closeStream(proxyOut);
@@ -1408,10 +1396,8 @@ class DataXceiver extends Receiver implements Runnable {
       final String[] storageIds) throws IOException {
     checkAndWaitForBP(blk);
     if (datanode.isBlockTokenEnabled) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Checking block access token for block '" + blk.getBlockId()
-            + "' with mode '" + mode + "'");
-      }
+      LOG.debug("Checking block access token for block '{}' with mode '{}'",
+          blk.getBlockId(), mode);
       try {
         datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode,
             storageTypes, storageIds);
@@ -1429,9 +1415,9 @@ class DataXceiver extends Receiver implements Runnable {
             resp.build().writeDelimitedTo(out);
             out.flush();
           }
-          LOG.warn("Block token verification failed: op=" + op
-              + ", remoteAddress=" + remoteAddress
-              + ", message=" + e.getLocalizedMessage());
+          LOG.warn("Block token verification failed: op={}, " +
+                  "remoteAddress={}, message={}",
+              op, remoteAddress, e.getLocalizedMessage());
           throw e;
         } finally {
           IOUtils.closeStream(out);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -6185,7 +6185,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         LOG.debug("Get corrupt file blocks returned error: " + e.getMessage());
       }
     } catch (IOException e) {
-      LOG.warn("Get corrupt file blocks returned error: " + e.getMessage());
+      LOG.warn("Get corrupt file blocks returned error", e);
     }
     return JSON.toString(list);
   }

+ 17 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java

@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ServiceFailedException;
@@ -51,6 +49,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Thread which runs inside the NN when it's in Standby state,
@@ -60,7 +60,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  */
 @InterfaceAudience.Private
 public class StandbyCheckpointer {
-  private static final Log LOG = LogFactory.getLog(StandbyCheckpointer.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StandbyCheckpointer.class);
   private static final long PREVENT_AFTER_CANCEL_MS = 2*60*1000L;
   private final CheckpointConf checkpointConf;
   private final Configuration conf;
@@ -136,8 +137,8 @@ public class StandbyCheckpointer {
 
   public void start() {
     LOG.info("Starting standby checkpoint thread...\n" +
-        "Checkpointing active NN to possible NNs: " + activeNNAddresses + "\n" +
-        "Serving checkpoints at " + myNNAddress);
+        "Checkpointing active NN to possible NNs: {}\n" +
+        "Serving checkpoints at {}", activeNNAddresses, myNNAddress);
     thread.start();
   }
   
@@ -177,8 +178,8 @@ public class StandbyCheckpointer {
       assert thisCheckpointTxId >= prevCheckpointTxId;
       if (thisCheckpointTxId == prevCheckpointTxId) {
         LOG.info("A checkpoint was triggered but the Standby Node has not " +
-            "received any transactions since the last checkpoint at txid " +
-            thisCheckpointTxId + ". Skipping...");
+            "received any transactions since the last checkpoint at txid {}. " +
+            "Skipping...", thisCheckpointTxId);
         return;
       }
 
@@ -253,8 +254,7 @@ public class StandbyCheckpointer {
         }
 
       } catch (ExecutionException e) {
-        ioe = new IOException("Exception during image upload: " + e.getMessage(),
-            e.getCause());
+        ioe = new IOException("Exception during image upload", e);
         break;
       } catch (InterruptedException e) {
         ie = e;
@@ -401,15 +401,15 @@ public class StandbyCheckpointer {
           if (needCheckpoint) {
             LOG.info("Triggering a rollback fsimage for rolling upgrade.");
           } else if (uncheckpointed >= checkpointConf.getTxnCount()) {
-            LOG.info("Triggering checkpoint because there have been " + 
-                uncheckpointed + " txns since the last checkpoint, which " +
-                "exceeds the configured threshold " +
-                checkpointConf.getTxnCount());
+            LOG.info("Triggering checkpoint because there have been {} txns " +
+                "since the last checkpoint, " +
+                "which exceeds the configured threshold {}",
+                uncheckpointed, checkpointConf.getTxnCount());
             needCheckpoint = true;
           } else if (secsSinceLast >= checkpointConf.getPeriod()) {
-            LOG.info("Triggering checkpoint because it has been " +
-                secsSinceLast + " seconds since the last checkpoint, which " +
-                "exceeds the configured interval " + checkpointConf.getPeriod());
+            LOG.info("Triggering checkpoint because it has been {} seconds " +
+                "since the last checkpoint, which exceeds the configured " +
+                "interval {}", secsSinceLast, checkpointConf.getPeriod());
             needCheckpoint = true;
           }
 
@@ -442,7 +442,7 @@ public class StandbyCheckpointer {
             LOG.info("Checkpoint finished successfully.");
           }
         } catch (SaveNamespaceCancelledException ce) {
-          LOG.info("Checkpoint was cancelled: " + ce.getMessage());
+          LOG.info("Checkpoint was cancelled: {}", ce.getMessage());
           canceledCount++;
         } catch (InterruptedException ie) {
           LOG.info("Interrupted during checkpointing", ie);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -1332,7 +1332,7 @@ public class MiniDFSCluster implements AutoCloseable {
     try {
       uri = new URI("hdfs://" + hostPort);
     } catch (URISyntaxException e) {
-      NameNode.LOG.warn("unexpected URISyntaxException: " + e );
+      NameNode.LOG.warn("unexpected URISyntaxException", e);
     }
     return uri;
   }

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java

@@ -32,13 +32,13 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class drives the creation of a mini-cluster on the local machine. By
@@ -58,8 +58,8 @@ import org.eclipse.jetty.util.ajax.JSON;
  * $HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/share/hadoop/hdfs/hadoop-hdfs-0.24.0-SNAPSHOT-tests.jar org.apache.hadoop.test.MiniDFSClusterManager -options...
  */
 public class MiniDFSClusterManager {
-  private static final Log LOG =
-    LogFactory.getLog(MiniDFSClusterManager.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MiniDFSClusterManager.class);
 
   private MiniDFSCluster dfs;
   private String writeDetails;
@@ -146,8 +146,8 @@ public class MiniDFSClusterManager {
                                           .build();
     dfs.waitActive();
     
-    LOG.info("Started MiniDFSCluster -- namenode on port "
-        + dfs.getNameNodePort());
+    LOG.info("Started MiniDFSCluster -- namenode on port {}",
+        dfs.getNameNodePort());
 
     if (writeConfig != null) {
       FileOutputStream fos = new FileOutputStream(new File(writeConfig));
@@ -180,7 +180,7 @@ public class MiniDFSClusterManager {
       CommandLineParser parser = new GnuParser();
       cli = parser.parse(options, args);
     } catch(ParseException e) {
-      LOG.warn("options parsing failed:  "+e.getMessage());
+      LOG.warn("options parsing failed", e);
       new HelpFormatter().printHelp("...", options);
       return false;
     }
@@ -192,7 +192,7 @@ public class MiniDFSClusterManager {
     
     if (cli.getArgs().length > 0) {
       for (String arg : cli.getArgs()) {
-        LOG.error("Unrecognized option: " + arg);
+        LOG.error("Unrecognized option: {}", arg);
         new HelpFormatter().printHelp("...", options);
         return false;
       }
@@ -236,12 +236,12 @@ public class MiniDFSClusterManager {
           conf2.set(keyval[0], keyval[1]);
           num_confs_updated++;
         } else {
-          LOG.warn("Ignoring -D option " + prop);
+          LOG.warn("Ignoring -D option {}", prop);
         }
       }
     }
-    LOG.info("Updated " + num_confs_updated +
-        " configuration settings from command line.");
+    LOG.info("Updated {} configuration settings from command line.",
+        num_confs_updated);
   }
 
   /**
@@ -254,8 +254,8 @@ public class MiniDFSClusterManager {
         return Integer.parseInt(o);
       } 
     } catch (NumberFormatException ex) {
-      LOG.error("Couldn't parse value (" + o + ") for option " 
-          + argName + ". Using default: " + defaultValue);
+      LOG.error("Couldn't parse value ({}) for option {}. " +
+          "Using default: {}", o, argName, defaultValue);
     }
     
     return defaultValue;    

+ 10 - 12
hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java

@@ -45,8 +45,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.tools.rumen.JobStoryProducer;
 import org.apache.hadoop.tools.rumen.ZombieJobProducer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Driver class for the Gridmix3 benchmark. Gridmix accepts a timestamped
@@ -58,7 +58,7 @@ import org.apache.commons.logging.LogFactory;
  */
 public class Gridmix extends Configured implements Tool {
 
-  public static final Log LOG = LogFactory.getLog(Gridmix.class);
+  public static final Logger LOG = LoggerFactory.getLogger(Gridmix.class);
 
   /**
    * Output (scratch) directory for submitted jobs. Relative paths are
@@ -184,8 +184,8 @@ public class Gridmix extends Configured implements Tool {
       final Configuration conf = getConf();
 
       if (inputDir.getFileSystem(conf).exists(inputDir)) {
-        LOG.error("Gridmix input data directory " + inputDir
-                  + " already exists when -generate option is used.\n");
+        LOG.error("Gridmix input data directory {} already exists " +
+            "when -generate option is used.", inputDir);
         return STARTUP_FAILED_ERROR;
       }
 
@@ -193,13 +193,13 @@ public class Gridmix extends Configured implements Tool {
       CompressionEmulationUtil.setupDataGeneratorConfig(conf);
     
       final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
-      LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) +
-               " of test data...");
+      LOG.info("Generating {} of test data...",
+          StringUtils.TraditionalBinaryPrefix.long2String(genbytes, "", 1));
       launchGridmixJob(genData);
     
       FsShell shell = new FsShell(conf);
       try {
-        LOG.info("Changing the permissions for inputPath " + inputDir.toString());
+        LOG.info("Changing the permissions for inputPath {}", inputDir);
         shell.run(new String[] {"-chmod","-R","777", inputDir.toString()});
       } catch (Exception e) {
         LOG.error("Couldnt change the file permissions " , e);
@@ -528,9 +528,7 @@ public class Gridmix extends Configured implements Tool {
         statistics.start();
       } catch (Throwable e) {
         LOG.error("Startup failed. " + e.toString() + "\n");
-        if (LOG.isDebugEnabled()) {
-          e.printStackTrace();
-        }
+        LOG.debug("Startup failed", e);
         if (factory != null) factory.abort(); // abort pipeline
         exitCode = STARTUP_FAILED_ERROR;
       } finally {
@@ -561,7 +559,7 @@ public class Gridmix extends Configured implements Tool {
         summarizer.finalize(factory, traceIn, genbytes, userResolver, stats, 
                             conf);
       }
-      IOUtils.cleanup(LOG, trace);
+      IOUtils.cleanupWithLogger(LOG, trace);
     }
     return exitCode;
   }

+ 11 - 18
hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java

@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.fs.swift.http;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
 import org.apache.hadoop.fs.swift.util.SwiftUtils;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.methods.HttpRequestBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayInputStream;
 import java.io.EOFException;
@@ -44,8 +44,8 @@ import java.net.URI;
  */
 public class HttpInputStreamWithRelease extends InputStream {
 
-  private static final Log LOG =
-    LogFactory.getLog(HttpInputStreamWithRelease.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HttpInputStreamWithRelease.class);
   private final URI uri;
   private HttpRequestBase req;
   private HttpResponse resp;
@@ -100,9 +100,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     if (!released) {
       reasonClosed = reason;
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Releasing connection to " + uri + ":  " + reason, ex);
-        }
+        LOG.debug("Releasing connection to {}:  {}", uri, reason, ex);
         if (req != null) {
           if (!dataConsumed) {
             req.abort();
@@ -137,7 +135,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     try {
       release(operation, ex);
     } catch (IOException ioe) {
-      LOG.debug("Exception during release: " + operation + " - " + ioe, ioe);
+      LOG.debug("Exception during release: {}", operation, ioe);
       //make this the exception if there was none before
       if (ex == null) {
         ex = ioe;
@@ -173,9 +171,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     try {
       read = inStream.read();
     } catch (EOFException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("EOF exception " + e, e);
-      }
+      LOG.debug("EOF exception", e);
       read = -1;
     } catch (IOException e) {
       throw releaseAndRethrow("read()", e);
@@ -200,9 +196,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     try {
       read = inStream.read(b, off, len);
     } catch (EOFException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("EOF exception " + e, e);
-      }
+      LOG.debug("EOF exception", e);
       read = -1;
     } catch (IOException e) {
       throw releaseAndRethrow("read(b, off, " + len + ")", e);
@@ -222,13 +216,12 @@ public class HttpInputStreamWithRelease extends InputStream {
   protected void finalize() {
     try {
       if (release("finalize()", constructionStack)) {
-        LOG.warn("input stream of " + uri
-                 + " not closed properly -cleaned up in finalize()");
+        LOG.warn("input stream of {}" +
+                 " not closed properly -cleaned up in finalize()", uri);
       }
     } catch (Exception e) {
       //swallow anything that failed here
-      LOG.warn("Exception while releasing " + uri + "in finalizer",
-               e);
+      LOG.warn("Exception while releasing {} in finalizer", uri, e);
     }
   }
 

Some files were not shown because too many files changed in this diff