ソースを参照

HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging.

Andrew Wang 9 年 前
コミット
2e7b7e2cda

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -791,6 +791,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8883. NameNode Metrics : Add FSNameSystem lock Queue Length.
     HDFS-8883. NameNode Metrics : Add FSNameSystem lock Queue Length.
     (Anu Engineer via xyao)
     (Anu Engineer via xyao)
 
 
+    HDFS-8713. Convert DatanodeDescriptor to use SLF4J logging. (wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

+ 21 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -33,8 +33,6 @@ import java.util.Set;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
@@ -50,6 +48,8 @@ import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.util.IntrusiveCollection;
 import org.apache.hadoop.util.IntrusiveCollection;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * This class extends the DatanodeInfo class with ephemeral information (eg
  * This class extends the DatanodeInfo class with ephemeral information (eg
@@ -59,7 +59,8 @@ import org.apache.hadoop.util.Time;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class DatanodeDescriptor extends DatanodeInfo {
 public class DatanodeDescriptor extends DatanodeInfo {
-  public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(DatanodeDescriptor.class);
   public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
   public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
 
 
   // Stores status of decommissioning.
   // Stores status of decommissioning.
@@ -319,9 +320,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
         Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
         Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
         DatanodeStorageInfo storageInfo = entry.getValue();
         DatanodeStorageInfo storageInfo = entry.getValue();
         if (storageInfo.getLastBlockReportId() != curBlockReportId) {
         if (storageInfo.getLastBlockReportId() != curBlockReportId) {
-          LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" +
-              Long.toHexString(storageInfo.getLastBlockReportId()) +
-              ", but curBlockReportId = 0x" +
+          LOG.info("{} had lastBlockReportId 0x{} but curBlockReportId = 0x{}",
+              storageInfo.getStorageID(),
+              Long.toHexString(storageInfo.getLastBlockReportId()),
               Long.toHexString(curBlockReportId));
               Long.toHexString(curBlockReportId));
           iter.remove();
           iter.remove();
           if (zombies == null) {
           if (zombies == null) {
@@ -446,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
     }
 
 
     if (checkFailedStorages) {
     if (checkFailedStorages) {
-      LOG.info("Number of failed storage changes from "
-          + this.volumeFailures + " to " + volFailures);
+      if (this.volumeFailures != volFailures) {
+        LOG.info("Number of failed storages changes from {} to {}",
+            this.volumeFailures, volFailures);
+      }
       synchronized (storageMap) {
       synchronized (storageMap) {
         failedStorageInfos =
         failedStorageInfos =
             new HashSet<>(storageMap.values());
             new HashSet<>(storageMap.values());
@@ -498,10 +501,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
    */
    */
   private void pruneStorageMap(final StorageReport[] reports) {
   private void pruneStorageMap(final StorageReport[] reports) {
     synchronized (storageMap) {
     synchronized (storageMap) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Number of storages reported in heartbeat=" + reports.length
-            + "; Number of storages in storageMap=" + storageMap.size());
-      }
+      LOG.debug("Number of storages reported in heartbeat={};"
+              + " Number of storages in storageMap={}", reports.length,
+          storageMap.size());
 
 
       HashMap<String, DatanodeStorageInfo> excessStorages;
       HashMap<String, DatanodeStorageInfo> excessStorages;
 
 
@@ -518,11 +520,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
       for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
       for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
         if (storageInfo.numBlocks() == 0) {
         if (storageInfo.numBlocks() == 0) {
           storageMap.remove(storageInfo.getStorageID());
           storageMap.remove(storageInfo.getStorageID());
-          LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
-        } else if (LOG.isDebugEnabled()) {
+          LOG.info("Removed storage {} from DataNode {}", storageInfo, this);
+        } else {
           // This can occur until all block reports are received.
           // This can occur until all block reports are received.
-          LOG.debug("Deferring removal of stale storage " + storageInfo
-              + " with " + storageInfo.numBlocks() + " blocks");
+          LOG.debug("Deferring removal of stale storage {} with {} blocks",
+              storageInfo, storageInfo.numBlocks());
         }
         }
       }
       }
     }
     }
@@ -532,7 +534,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
       Set<DatanodeStorageInfo> failedStorageInfos) {
       Set<DatanodeStorageInfo> failedStorageInfos) {
     for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
     for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
       if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
       if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
-        LOG.info(storageInfo + " failed.");
+        LOG.info("{} failed.", storageInfo);
         storageInfo.setState(DatanodeStorage.State.FAILED);
         storageInfo.setState(DatanodeStorage.State.FAILED);
       }
       }
     }
     }
@@ -857,8 +859,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
     synchronized (storageMap) {
     synchronized (storageMap) {
       DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
       DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
       if (storage == null) {
       if (storage == null) {
-        LOG.info("Adding new storage ID " + s.getStorageID() +
-                 " for DN " + getXferAddr());
+        LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
+            getXferAddr());
         storage = new DatanodeStorageInfo(this, s);
         storage = new DatanodeStorageInfo(this, s);
         storageMap.put(s.getStorageID(), storage);
         storageMap.put(s.getStorageID(), storage);
       } else if (storage.getState() != s.getState() ||
       } else if (storage.getState() != s.getState() ||