瀏覽代碼

HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common.

This closes #251

Signed-off-by: Akira Ajisaka <aajisaka@apache.org>
wenxinhe 7 年之前
父節點
當前提交
7e583a3824
共有 100 個文件被更改,包括 394 次插入378 次删除
  1. 8 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  2. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
  3. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
  4. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
  5. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
  6. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
  7. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
  8. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
  9. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  10. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
  11. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
  12. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
  13. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  14. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  15. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  16. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
  17. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
  18. 6 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  19. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  20. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
  21. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  22. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  23. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  24. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
  25. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
  26. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
  27. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  28. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  29. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  30. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
  31. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
  32. 3 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
  33. 10 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  34. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
  35. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  36. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  37. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
  38. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
  39. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
  40. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  41. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
  42. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
  43. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  44. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
  45. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
  46. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  47. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
  48. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
  49. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
  50. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
  51. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
  52. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
  53. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
  54. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
  55. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
  56. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
  57. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
  58. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
  59. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
  60. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
  61. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
  62. 8 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
  63. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
  64. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  65. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
  66. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
  67. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
  68. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  69. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
  70. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  71. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  72. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
  73. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
  74. 8 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  75. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
  76. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  77. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java
  78. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
  79. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
  80. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  81. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsUtil.java
  82. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
  83. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
  84. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/jvm/JvmMetrics.java
  85. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java
  86. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java
  87. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java
  88. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java
  89. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java
  90. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
  91. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
  92. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
  93. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
  94. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  95. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
  96. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
  97. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
  98. 3 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
  99. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
  100. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java

+ 8 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -77,8 +77,6 @@ import javax.xml.transform.stream.StreamResult;
 
 import com.google.common.base.Charsets;
 import org.apache.commons.collections.map.UnmodifiableMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -98,6 +96,8 @@ import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerator;
 import org.codehaus.stax2.XMLInputFactory2;
 import org.codehaus.stax2.XMLStreamReader2;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
@@ -183,11 +183,11 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Stable
 public class Configuration implements Iterable<Map.Entry<String,String>>,
                                       Writable {
-  private static final Log LOG =
-    LogFactory.getLog(Configuration.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Configuration.class);
 
-  private static final Log LOG_DEPRECATION =
-    LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation");
+  private static final Logger LOG_DEPRECATION = LoggerFactory.getLogger(
+          "org.apache.hadoop.conf.Configuration.deprecation");
 
   private boolean quietmode = true;
 
@@ -2801,10 +2801,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       }
       return null;
     } catch (IOException e) {
-      LOG.fatal("error parsing conf " + name, e);
+      LOG.error("error parsing conf " + name, e);
       throw new RuntimeException(e);
     } catch (XMLStreamException e) {
-      LOG.fatal("error parsing conf " + name, e);
+      LOG.error("error parsing conf " + name, e);
       throw new RuntimeException(e);
     }
   }

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java

@@ -22,9 +22,10 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -41,8 +42,8 @@ import java.util.Map;
 public abstract class ReconfigurableBase 
   extends Configured implements Reconfigurable {
   
-  private static final Log LOG =
-    LogFactory.getLog(ReconfigurableBase.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReconfigurableBase.class);
   // Use for testing purpose.
   private ReconfigurationUtil reconfigurationUtil = new ReconfigurationUtil();
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.conf;
 
-import org.apache.commons.logging.*;
-
 import org.apache.commons.lang.StringEscapeUtils;
 
 import java.util.Collection;
@@ -33,6 +31,8 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A servlet for changing a node's configuration.
@@ -45,8 +45,8 @@ public class ReconfigurationServlet extends HttpServlet {
   
   private static final long serialVersionUID = 1L;
 
-  private static final Log LOG =
-    LogFactory.getLog(ReconfigurationServlet.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReconfigurationServlet.class);
 
   // the prefix used to fing the attribute holding the reconfigurable 
   // for a given request

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java

@@ -26,12 +26,12 @@ import javax.crypto.Cipher;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
@@ -42,8 +42,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
  */
 @InterfaceAudience.Private
 public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
-  private static final Log LOG =
-      LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JceAesCtrCryptoCodec.class.getName());
   
   private Configuration conf;
   private String provider;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java

@@ -26,22 +26,22 @@ import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.crypto.random.OsSecureRandom;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implement the AES-CTR crypto codec using JNI into OpenSSL.
  */
 @InterfaceAudience.Private
 public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
-  private static final Log LOG =
-      LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpensslAesCtrCryptoCodec.class.getName());
 
   private Configuration conf;
   private Random random;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java

@@ -26,12 +26,12 @@ import javax.crypto.IllegalBlockSizeException;
 import javax.crypto.NoSuchPaddingException;
 import javax.crypto.ShortBufferException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OpenSSL cipher using JNI.
@@ -40,8 +40,8 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public final class OpensslCipher {
-  private static final Log LOG =
-      LogFactory.getLog(OpensslCipher.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpensslCipher.class.getName());
   public static final int ENCRYPT_MODE = 1;
   public static final int DECRYPT_MODE = 0;
   

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java

@@ -19,12 +19,12 @@ package org.apache.hadoop.crypto.random;
 
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OpenSSL secure random using JNI.
@@ -43,8 +43,8 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 public class OpensslSecureRandom extends Random {
   private static final long serialVersionUID = -7828193502768789584L;
-  private static final Log LOG =
-      LogFactory.getLog(OpensslSecureRandom.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpensslSecureRandom.class.getName());
   
   /** If native SecureRandom unavailable, use java SecureRandom */
   private java.security.SecureRandom fallback = null;

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java

@@ -23,12 +23,12 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
@@ -39,7 +39,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
  */
 @InterfaceAudience.Private
 public class OsSecureRandom extends Random implements Closeable, Configurable {
-  public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OsSecureRandom.class);
   
   private static final long serialVersionUID = 6391500337172057900L;
 
@@ -112,7 +113,7 @@ public class OsSecureRandom extends Random implements Closeable, Configurable {
   @Override
   synchronized public void close() {
     if (stream != null) {
-      IOUtils.cleanup(LOG, stream);
+      IOUtils.cleanupWithLogger(LOG, stream);
       stream = null;
     }
   }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -32,8 +32,6 @@ import java.util.NoSuchElementException;
 import java.util.StringTokenizer;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -52,6 +50,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class provides an interface for implementors of a Hadoop file system
@@ -66,7 +66,7 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public abstract class AbstractFileSystem {
-  static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
+  static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);
 
   /** Recording statistics per a file system class. */
   private static final Map<URI, Statistics> 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -27,14 +27,14 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Abstract Checksumed Fs.
@@ -110,8 +110,8 @@ public abstract class ChecksumFs extends FilterFs {
    * It verifies that data matches checksums.
    *******************************************************/
   private static class ChecksumFSInputChecker extends FSInputChecker {
-    public static final Log LOG 
-      = LogFactory.getLog(FSInputChecker.class);
+    public static final Logger LOG =
+        LoggerFactory.getLogger(FSInputChecker.class);
     private static final int HEADER_LENGTH = 8;
     
     private ChecksumFs fs;

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java

@@ -26,12 +26,12 @@ import java.util.concurrent.DelayQueue;
 import java.util.concurrent.Delayed;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A daemon thread that waits for the next file system to renew.
@@ -39,8 +39,8 @@ import org.apache.hadoop.util.Time;
 @InterfaceAudience.Private
 public class DelegationTokenRenewer
     extends Thread {
-  private static final Log LOG = LogFactory
-      .getLog(DelegationTokenRenewer.class);
+  private static final Logger LOG = LoggerFactory
+      .getLogger(DelegationTokenRenewer.class);
 
   /** The renewable interface used by the renewer. */
   public interface Renewable {
@@ -243,7 +243,7 @@ public class DelegationTokenRenewer
         LOG.error("Interrupted while canceling token for " + fs.getUri()
             + "filesystem");
         if (LOG.isDebugEnabled()) {
-          LOG.debug(ie.getStackTrace());
+          LOG.debug("Exception in removeRenewAction: ", ie);
         }
       }
     }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java

@@ -22,11 +22,12 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.zip.Checksum;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.nio.ByteBuffer;
 import java.nio.IntBuffer;
 
@@ -37,8 +38,8 @@ import java.nio.IntBuffer;
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 abstract public class FSInputChecker extends FSInputStream {
-  public static final Log LOG 
-  = LogFactory.getLog(FSInputChecker.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(FSInputChecker.class);
   
   /** The file name from which data is read from */
   protected Path file;

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -35,8 +35,6 @@ import java.util.Stack;
 import java.util.TreeSet;
 import java.util.Map.Entry;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -62,6 +60,8 @@ import org.apache.hadoop.util.ShutdownHookManager;
 
 import com.google.common.base.Preconditions;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The FileContext class provides an interface to the application writer for
@@ -178,7 +178,7 @@ import org.apache.htrace.core.Tracer;
 @InterfaceStability.Stable
 public class FileContext {
   
-  public static final Log LOG = LogFactory.getLog(FileContext.class);
+  public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
   /**
    * Default permission for directory and symlink
    * In previous versions, this default permission was also used to
@@ -343,7 +343,7 @@ public class FileContext {
         }
       });
     } catch (InterruptedException ex) {
-      LOG.error(ex);
+      LOG.error(ex.toString());
       throw new IOException("Failed to get the AbstractFileSystem for path: "
           + uri, ex);
     }
@@ -457,7 +457,7 @@ public class FileContext {
     } catch (UnsupportedFileSystemException ex) {
       throw ex;
     } catch (IOException ex) {
-      LOG.error(ex);
+      LOG.error(ex.toString());
       throw new RuntimeException(ex);
     }
     return getFileContext(defaultAfs, aConf);

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -45,8 +45,6 @@ import java.util.zip.ZipFile;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -57,6 +55,8 @@ import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of file-processing util methods
@@ -65,7 +65,7 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceStability.Evolving
 public class FileUtil {
 
-  private static final Log LOG = LogFactory.getLog(FileUtil.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class);
 
   /* The error code is defined in winutils to indicate insufficient
    * privilege to create symbolic links. This value need to keep in
@@ -732,7 +732,7 @@ public class FileUtil {
         entry = tis.getNextTarEntry();
       }
     } finally {
-      IOUtils.cleanup(LOG, tis, inputStream);
+      IOUtils.cleanupWithLogger(LOG, tis, inputStream);
     }
   }
   
@@ -1317,7 +1317,7 @@ public class FileUtil {
       bos = new BufferedOutputStream(fos);
       jos = new JarOutputStream(bos, jarManifest);
     } finally {
-      IOUtils.cleanup(LOG, jos, bos, fos);
+      IOUtils.cleanupWithLogger(LOG, jos, bos, fos);
     }
     String[] jarCp = {classPathJar.getCanonicalPath(),
                         unexpandedWildcardClasspath.toString()};

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -24,8 +24,6 @@ import java.util.Arrays;
 import java.util.LinkedList;
 
 import org.apache.commons.lang.WordUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -39,12 +37,14 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Provide command line access to a FileSystem. */
 @InterfaceAudience.Private
 public class FsShell extends Configured implements Tool {
   
-  static final Log LOG = LogFactory.getLog(FsShell.class);
+  static final Logger LOG = LoggerFactory.getLogger(FsShell.class);
 
   private static final int MAX_LINE_WIDTH = 80;
 

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java

@@ -22,7 +22,6 @@ import java.util.LinkedList;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.ChmodParser;
@@ -32,6 +31,7 @@ import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.fs.shell.FsCommand;
 import org.apache.hadoop.fs.shell.PathData;
 import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
 
 /**
  * This class is the home for file permissions related commands.
@@ -41,7 +41,7 @@ import org.apache.hadoop.util.Shell;
 @InterfaceStability.Unstable
 public class FsShellPermissions extends FsCommand {
 
-  static Log LOG = FsShell.LOG;
+  static final Logger LOG = FsShell.LOG;
   
   /**
    * Register the permission related commands with the factory

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java

@@ -23,18 +23,19 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 class Globber {
-  public static final Log LOG = LogFactory.getLog(Globber.class.getName());
+  public static final Logger LOG =
+      LoggerFactory.getLogger(Globber.class.getName());
 
   private final FileSystem fs;
   private final FileContext fc;

+ 6 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.fs;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.FileNotFoundException;
@@ -50,7 +50,8 @@ import java.util.*;
 
 public class HarFileSystem extends FileSystem {
 
-  private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HarFileSystem.class);
 
   public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
   public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
@@ -1173,7 +1174,7 @@ public class HarFileSystem extends FileSystem {
         LOG.warn("Encountered exception ", ioe);
         throw ioe;
       } finally {
-        IOUtils.cleanup(LOG, lin, in);
+        IOUtils.cleanupWithLogger(LOG, lin, in);
       }
 
       FSDataInputStream aIn = fs.open(archiveIndexPath);
@@ -1198,7 +1199,7 @@ public class HarFileSystem extends FileSystem {
           }
         }
       } finally {
-        IOUtils.cleanup(LOG, aIn);
+        IOUtils.cleanupWithLogger(LOG, aIn);
       }
     }
   }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -23,14 +23,15 @@ import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.*;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration; 
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** An implementation of a round-robin scheme for disk allocation for creating
  * files. The way it works is that it is kept track what disk was last
@@ -245,8 +246,8 @@ public class LocalDirAllocator {
   
   private static class AllocatorPerContext {
 
-    private final Log LOG =
-      LogFactory.getLog(AllocatorPerContext.class);
+    private static final Logger LOG =
+        LoggerFactory.getLogger(AllocatorPerContext.class);
 
     private Random dirIndexRandomizer = new Random();
     private String contextCfgItemName;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java

@@ -19,11 +19,12 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** 
  * Provides a trash facility which supports pluggable Trash policies. 
@@ -34,8 +35,7 @@ import org.apache.hadoop.conf.Configured;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
-  private static final org.apache.commons.logging.Log LOG =
-      LogFactory.getLog(Trash.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Trash.class);
 
   private TrashPolicy trashPolicy; // configured trash policy instance
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -30,8 +30,6 @@ import java.text.SimpleDateFormat;
 import java.util.Collection;
 import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -41,6 +39,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Provides a <i>trash</i> feature.  Files are moved to a user's trash
  * directory, a subdirectory of their home directory named ".Trash".  Files are
@@ -54,8 +54,8 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class TrashPolicyDefault extends TrashPolicy {
-  private static final Log LOG =
-    LogFactory.getLog(TrashPolicyDefault.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TrashPolicyDefault.class);
 
   private static final Path CURRENT = new Path("Current");
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -25,8 +25,6 @@ import java.net.URI;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.commons.net.ftp.FTP;
 import org.apache.commons.net.ftp.FTPClient;
 import org.apache.commons.net.ftp.FTPFile;
@@ -45,6 +43,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -56,8 +56,8 @@ import org.apache.hadoop.util.Progressable;
 @InterfaceStability.Stable
 public class FTPFileSystem extends FileSystem {
 
-  public static final Log LOG = LogFactory
-      .getLog(FTPFileSystem.class);
+  public static final Logger LOG = LoggerFactory
+      .getLogger(FTPFileSystem.class);
 
   public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
 

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -21,8 +21,6 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -30,6 +28,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A class for file/directory permissions.
@@ -37,7 +37,7 @@ import org.apache.hadoop.io.WritableFactory;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class FsPermission implements Writable {
-  private static final Log LOG = LogFactory.getLog(FsPermission.class);
+  private static final Logger LOG = LoggerFactory.getLogger(FsPermission.class);
 
   static final WritableFactory FACTORY = new WritableFactory() {
     @Override

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java

@@ -23,19 +23,20 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.StringUtils;
 
 import com.jcraft.jsch.ChannelSftp;
 import com.jcraft.jsch.JSch;
 import com.jcraft.jsch.JSchException;
 import com.jcraft.jsch.Session;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Concurrent/Multiple Connections. */
 class SFTPConnectionPool {
 
-  public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SFTPFileSystem.class);
   // Maximum number of allowed live connections. This doesn't mean we cannot
   // have more live connections. It means that when we have more
   // live connections than this threshold, any unused connection will be

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java

@@ -26,8 +26,6 @@ import java.net.URLDecoder;
 import java.util.ArrayList;
 import java.util.Vector;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -41,11 +39,14 @@ import com.jcraft.jsch.ChannelSftp;
 import com.jcraft.jsch.ChannelSftp.LsEntry;
 import com.jcraft.jsch.SftpATTRS;
 import com.jcraft.jsch.SftpException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** SFTP FileSystem. */
 public class SFTPFileSystem extends FileSystem {
 
-  public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SFTPFileSystem.class);
 
   private SFTPConnectionPool connectionPool;
   private URI uri;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java

@@ -26,8 +26,6 @@ import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -35,6 +33,8 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An abstract class for the execution of a file system command
@@ -58,7 +58,7 @@ abstract public class Command extends Configured {
   private int depth = 0;
   protected ArrayList<Exception> exceptions = new ArrayList<Exception>();
 
-  private static final Log LOG = LogFactory.getLog(Command.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Command.class);
 
   /** allows stdout to be captured if necessary */
   public PrintStream out = System.out;

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -26,8 +26,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -47,6 +45,8 @@ import org.apache.zookeeper.KeeperException.Code;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -141,7 +141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   @VisibleForTesting
   protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
 
-  public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ActiveStandbyElector.class);
 
   private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
 
@@ -712,7 +713,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   }
 
   private void fatalError(String errorMessage) {
-    LOG.fatal(errorMessage);
+    LOG.error(errorMessage);
     reset();
     appClient.notifyFatalError(errorMessage);
   }
@@ -824,10 +825,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         createConnection();
         success = true;
       } catch(IOException e) {
-        LOG.warn(e);
+        LOG.warn(e.toString());
         sleepFor(5000);
       } catch(KeeperException e) {
-        LOG.warn(e);
+        LOG.warn(e.toString());
         sleepFor(5000);
       }
       ++connectionRetryCount;
@@ -866,7 +867,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     try {
       tempZk.close();
     } catch(InterruptedException e) {
-      LOG.warn(e);
+      LOG.warn(e.toString());
     }
     zkConnectionState = ConnectionState.TERMINATED;
     wantToBeInElection = false;

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -19,9 +19,6 @@ package org.apache.hadoop.ha;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -32,6 +29,8 @@ import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
 import org.apache.hadoop.ipc.RPC;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The FailOverController is responsible for electing an active service
@@ -43,7 +42,8 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class FailoverController {
 
-  private static final Log LOG = LogFactory.getLog(FailoverController.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FailoverController.class);
 
   private final int gracefulFenceTimeout;
   private final int rpcTimeoutToNewActive;
@@ -252,7 +252,7 @@ public class FailoverController {
         } catch (FailoverFailedException ffe) {
           msg += ". Failback to " + fromSvc +
             " failed (" + ffe.getMessage() + ")";
-          LOG.fatal(msg);
+          LOG.error(msg);
         }
       }
       throw new FailoverFailedException(msg, cause);

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -28,8 +28,6 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -43,6 +41,8 @@ import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A command-line tool for making calls in the HAServiceProtocol.
@@ -62,7 +62,7 @@ public abstract class HAAdmin extends Configured implements Tool {
    * operation, which is why it is not documented in the usage below.
    */
   private static final String FORCEMANUAL = "forcemanual";
-  private static final Log LOG = LogFactory.getLog(HAAdmin.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class);
 
   private int rpcTimeoutForChecks = -1;
   
@@ -449,7 +449,7 @@ public abstract class HAAdmin extends Configured implements Tool {
     
     if (cmdLine.hasOption(FORCEMANUAL)) {
       if (!confirmForceManual()) {
-        LOG.fatal("Aborted");
+        LOG.error("Aborted");
         return -1;
       }
       // Instruct the NNs to honor this request even if they're

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java

@@ -23,8 +23,6 @@ import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -35,6 +33,8 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.util.Daemon;
 
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is a daemon which runs in a loop, periodically heartbeating
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public class HealthMonitor {
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       HealthMonitor.class);
 
   private Daemon daemon;
@@ -283,7 +283,7 @@ public class HealthMonitor {
       setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
         @Override
         public void uncaughtException(Thread t, Throwable e) {
-          LOG.fatal("Health monitor failed", e);
+          LOG.error("Health monitor failed", e);
           enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
         }
       });

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java

@@ -22,8 +22,6 @@ import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -31,6 +29,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class parses the configured list of fencing methods, and
@@ -61,7 +61,7 @@ public class NodeFencer {
   private static final Pattern HASH_COMMENT_RE =
     Pattern.compile("#.*$");
 
-  private static final Log LOG = LogFactory.getLog(NodeFencer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(NodeFencer.class);
 
   /**
    * Standard fencing methods included with Hadoop.

+ 3 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

@@ -23,8 +23,6 @@ import java.util.Collection;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -272,7 +270,7 @@ public class SshFenceByTcpPort extends Configured
    * Adapter from JSch's logger interface to our log4j
    */
   private static class LogAdapter implements com.jcraft.jsch.Logger {
-    static final Log LOG = LogFactory.getLog(
+    static final Logger LOG = LoggerFactory.getLogger(
         SshFenceByTcpPort.class.getName() + ".jsch");
 
     @Override
@@ -285,9 +283,8 @@ public class SshFenceByTcpPort extends Configured
       case com.jcraft.jsch.Logger.WARN:
         return LOG.isWarnEnabled();
       case com.jcraft.jsch.Logger.ERROR:
-        return LOG.isErrorEnabled();
       case com.jcraft.jsch.Logger.FATAL:
-        return LOG.isFatalEnabled();
+        return LOG.isErrorEnabled();
       default:
         return false;
       }
@@ -306,10 +303,8 @@ public class SshFenceByTcpPort extends Configured
         LOG.warn(message);
         break;
       case com.jcraft.jsch.Logger.ERROR:
-        LOG.error(message);
-        break;
       case com.jcraft.jsch.Logger.FATAL:
-        LOG.fatal(message);
+        LOG.error(message);
         break;
       default:
         break;

+ 10 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -27,8 +27,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -56,11 +54,13 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.LimitedPrivate("HDFS")
 public abstract class ZKFailoverController {
 
-  static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
+  static final Logger LOG = LoggerFactory.getLogger(ZKFailoverController.class);
   
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
@@ -161,7 +161,7 @@ public abstract class ZKFailoverController {
 
   public int run(final String[] args) throws Exception {
     if (!localTarget.isAutoFailoverEnabled()) {
-      LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
+      LOG.error("Automatic failover is not enabled for " + localTarget + "." +
           " Please ensure that automatic failover is enabled in the " +
           "configuration before running the ZK failover controller.");
       return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
@@ -183,7 +183,7 @@ public abstract class ZKFailoverController {
         }
       });
     } catch (RuntimeException rte) {
-      LOG.fatal("The failover controller encounters runtime error: " + rte);
+      LOG.error("The failover controller encounters runtime error: " + rte);
       throw (Exception)rte.getCause();
     }
   }
@@ -194,7 +194,7 @@ public abstract class ZKFailoverController {
     try {
       initZK();
     } catch (KeeperException ke) {
-      LOG.fatal("Unable to start failover controller. Unable to connect "
+      LOG.error("Unable to start failover controller. Unable to connect "
           + "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
           + "configured value for " + ZK_QUORUM_KEY + " and ensure that "
           + "ZooKeeper is running.");
@@ -220,7 +220,7 @@ public abstract class ZKFailoverController {
     }
 
     if (!elector.parentZNodeExists()) {
-      LOG.fatal("Unable to start failover controller. "
+      LOG.error("Unable to start failover controller. "
           + "Parent znode does not exist.\n"
           + "Run with -formatZK flag to initialize ZooKeeper.");
       return ERR_CODE_NO_PARENT_ZNODE;
@@ -229,7 +229,7 @@ public abstract class ZKFailoverController {
     try {
       localTarget.checkFencingConfigured();
     } catch (BadFencingConfigurationException e) {
-      LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
+      LOG.error("Fencing is not configured for " + localTarget + ".\n" +
           "You must configure a fencing method before using automatic " +
           "failover.", e);
       return ERR_CODE_NO_FENCER;
@@ -375,7 +375,7 @@ public abstract class ZKFailoverController {
   }
   
   private synchronized void fatalError(String err) {
-    LOG.fatal("Fatal error occurred:" + err);
+    LOG.error("Fatal error occurred:" + err);
     fatalError = err;
     notifyAll();
   }
@@ -394,7 +394,7 @@ public abstract class ZKFailoverController {
 
     } catch (Throwable t) {
       String msg = "Couldn't make " + localTarget + " active";
-      LOG.fatal(msg, t);
+      LOG.error(msg, t);
       
       recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
           StringUtils.stringifyException(t)));

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.ha.protocolPB;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -42,6 +40,8 @@ import org.apache.hadoop.ipc.RPC;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is used on the server side. Calls come across the wire for the
@@ -61,7 +61,7 @@ public class HAServiceProtocolServerSideTranslatorPB implements
       TransitionToActiveResponseProto.newBuilder().build();
   private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
       TransitionToStandbyResponseProto.newBuilder().build();
-  private static final Log LOG = LogFactory.getLog(
+  private static final Logger LOG = LoggerFactory.getLogger(
       HAServiceProtocolServerSideTranslatorPB.class);
   
   public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -44,8 +44,6 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
@@ -82,6 +80,8 @@ import org.mortbay.thread.QueuedThreadPool;
 import org.mortbay.util.MultiException;
 
 import com.sun.jersey.spi.container.servlet.ServletContainer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal
@@ -100,7 +100,7 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
 @InterfaceStability.Evolving
 @Deprecated
 public class HttpServer implements FilterContainer {
-  public static final Log LOG = LogFactory.getLog(HttpServer.class);
+  public static final Logger LOG = LoggerFactory.getLogger(HttpServer.class);
 
   static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -46,8 +46,6 @@ import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
 import com.google.common.collect.ImmutableMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -96,6 +94,9 @@ import org.mortbay.util.MultiException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
 
@@ -112,7 +113,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_US
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public final class HttpServer2 implements FilterContainer {
-  public static final Log LOG = LogFactory.getLog(HttpServer2.class);
+  public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
 
   static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java

@@ -29,11 +29,11 @@ import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.servlet.Filter;
 
@@ -47,7 +47,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_S
 public class StaticUserWebFilter extends FilterInitializer {
   static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
 
-  private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StaticUserWebFilter.class);
 
   static class User implements Principal {
     private final String name;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java

@@ -22,8 +22,6 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,8 @@ import org.apache.hadoop.util.bloom.DynamicBloomFilter;
 import org.apache.hadoop.util.bloom.Filter;
 import org.apache.hadoop.util.bloom.Key;
 import org.apache.hadoop.util.hash.Hash;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
@@ -52,7 +52,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOO
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class BloomMapFile {
-  private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(BloomMapFile.class);
   public static final String BLOOM_FILE_NAME = "bloom";
   public static final int HASH_COUNT = 5;
   

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java

@@ -22,11 +22,10 @@ import java.nio.ByteOrder;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import sun.misc.Unsafe;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import com.google.common.primitives.Longs;
 import com.google.common.primitives.UnsignedBytes;
 
@@ -36,7 +35,7 @@ import com.google.common.primitives.UnsignedBytes;
  * class to be able to compare arrays that start at non-zero offsets.
  */
 abstract class FastByteComparisons {
-  static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
+  static final Logger LOG = LoggerFactory.getLogger(FastByteComparisons.class);
 
   /**
    * Lexicographically compare two byte arrays.

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -32,13 +32,13 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -49,7 +49,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class IOUtils {
-  public static final Log LOG = LogFactory.getLog(IOUtils.class);
+  public static final Logger LOG = LoggerFactory.getLogger(IOUtils.class);
 
   /**
    * Copies from one stream to another.

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java

@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -37,6 +35,8 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
@@ -60,7 +60,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SK
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class MapFile {
-  private static final Log LOG = LogFactory.getLog(MapFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MapFile.class);
 
   /** The name of the index file. */
   public static final String INDEX_FILE_NAME = "index";
@@ -1002,7 +1002,7 @@ public class MapFile {
       while (reader.next(key, value))               // copy all entries
         writer.append(key, value);
     } finally {
-      IOUtils.cleanup(LOG, writer, reader);
+      IOUtils.cleanupWithLogger(LOG, writer, reader);
     }
   }
 }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java

@@ -23,8 +23,6 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -33,6 +31,8 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
 
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manages a pool of threads which can issue readahead requests on file descriptors.
@@ -40,7 +40,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class ReadaheadPool {
-  static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
+  static final Logger LOG = LoggerFactory.getLogger(ReadaheadPool.class);
   private static final int POOL_SIZE = 4;
   private static final int MAX_POOL_SIZE = 16;
   private static final int CAPACITY = 1024;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -24,7 +24,6 @@ import java.util.*;
 import java.rmi.server.UID;
 import java.security.MessageDigest;
 
-import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.Options.CreateOpts;
@@ -50,6 +49,8 @@ import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.MergeSort;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -202,7 +203,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class SequenceFile {
-  private static final Log LOG = LogFactory.getLog(SequenceFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SequenceFile.class);
 
   private SequenceFile() {}                         // no public ctor
 
@@ -1893,7 +1894,7 @@ public class SequenceFile {
         succeeded = true;
       } finally {
         if (!succeeded) {
-          IOUtils.cleanup(LOG, this.in);
+          IOUtils.cleanupWithLogger(LOG, this.in);
         }
       }
     }

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java

@@ -25,9 +25,10 @@ import java.io.UTFDataFormatException;
 
 import org.apache.hadoop.util.StringUtils;
 
-import org.apache.commons.logging.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** A WritableComparable for strings that uses the UTF8 encoding.
  * 
@@ -42,7 +43,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Stable
 public class UTF8 implements WritableComparable<UTF8> {
-  private static final Log LOG= LogFactory.getLog(UTF8.class);
+  private static final Logger LOG= LoggerFactory.getLogger(UTF8.class);
   private static final DataInputBuffer IBUF = new DataInputBuffer();
 
   private static final ThreadLocal<DataOutputBuffer> OBUF_FACTORY =

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java

@@ -23,8 +23,6 @@ import java.util.Set;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +31,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A global compressor/decompressor pool used to save and reuse 
@@ -41,7 +41,7 @@ import com.google.common.cache.LoadingCache;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class CodecPool {
-  private static final Log LOG = LogFactory.getLog(CodecPool.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CodecPool.class);
   
   /**
    * A global compressor pool used to save the expensive 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.io.compress;
 
 import java.util.*;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -28,6 +26,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A factory that will find the correct codec for a given filename.
@@ -36,8 +36,8 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceStability.Evolving
 public class CompressionCodecFactory {
 
-  public static final Log LOG =
-    LogFactory.getLog(CompressionCodecFactory.class.getName());
+  public static final Logger LOG =
+      LoggerFactory.getLogger(CompressionCodecFactory.class.getName());
   
   private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
     ServiceLoader.load(CompressionCodec.class);

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java

@@ -22,14 +22,14 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -37,7 +37,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
-  private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DefaultCodec.class);
   
   Configuration conf;
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java

@@ -24,9 +24,8 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the popular 
@@ -42,7 +41,8 @@ public class Bzip2Compressor implements Compressor {
   static final int DEFAULT_BLOCK_SIZE = 9;
   static final int DEFAULT_WORK_FACTOR = 30;
 
-  private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Bzip2Compressor.class);
 
   private long stream;
   private int blockSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java

@@ -23,9 +23,8 @@ import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.io.compress.Decompressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Decompressor} based on the popular 
@@ -36,7 +35,8 @@ import org.apache.commons.logging.LogFactory;
 public class Bzip2Decompressor implements Decompressor {
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
   
-  private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Bzip2Decompressor.class);
 
   private long stream;
   private boolean conserveMemory;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.io.compress.bzip2;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.NativeCodeLoader;
@@ -30,6 +28,8 @@ import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
 import org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor;
 import org.apache.hadoop.io.compress.bzip2.BZip2DummyCompressor;
 import org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of factories to create the right 
@@ -37,7 +37,7 @@ import org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor;
  * 
  */
 public class Bzip2Factory {
-  private static final Log LOG = LogFactory.getLog(Bzip2Factory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Bzip2Factory.class);
 
   private static String bzip2LibraryName = "";
   private static boolean nativeBzip2Loaded;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java

@@ -22,19 +22,19 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the lz4 compression algorithm.
  * http://code.google.com/p/lz4/
  */
 public class Lz4Compressor implements Compressor {
-  private static final Log LOG =
-      LogFactory.getLog(Lz4Compressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Lz4Compressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java

@@ -22,18 +22,18 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Decompressor} based on the lz4 compression algorithm.
  * http://code.google.com/p/lz4/
  */
 public class Lz4Decompressor implements Decompressor {
-  private static final Log LOG =
-      LogFactory.getLog(Lz4Compressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(Lz4Compressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java

@@ -22,19 +22,19 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the snappy compression algorithm.
  * http://code.google.com/p/snappy/
  */
 public class SnappyCompressor implements Compressor {
-  private static final Log LOG =
-      LogFactory.getLog(SnappyCompressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SnappyCompressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java

@@ -22,19 +22,19 @@ import java.io.IOException;
 import java.nio.Buffer;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DirectDecompressor;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Decompressor} based on the snappy compression algorithm.
  * http://code.google.com/p/snappy/
  */
 public class SnappyDecompressor implements Decompressor {
-  private static final Log LOG =
-      LogFactory.getLog(SnappyDecompressor.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SnappyDecompressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
   private int directBufferSize;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java

@@ -23,9 +23,8 @@ import java.util.zip.Deflater;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A wrapper around java.util.zip.Deflater to make it conform 
@@ -34,7 +33,8 @@ import org.apache.commons.logging.LogFactory;
  */
 public class BuiltInZlibDeflater extends Deflater implements Compressor {
 
-  private static final Log LOG = LogFactory.getLog(BuiltInZlibDeflater.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BuiltInZlibDeflater.class);
 
   public BuiltInZlibDeflater(int level, boolean nowrap) {
     super(level, nowrap);

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java

@@ -25,9 +25,8 @@ import java.nio.ByteBuffer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.util.NativeCodeLoader;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Compressor} based on the popular 
@@ -37,7 +36,8 @@ import org.apache.commons.logging.LogFactory;
  */
 public class ZlibCompressor implements Compressor {
 
-  private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ZlibCompressor.class);
 
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
 

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.io.compress.zlib;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
@@ -28,6 +26,8 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of factories to create the right 
@@ -35,8 +35,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
  * 
  */
 public class ZlibFactory {
-  private static final Log LOG =
-    LogFactory.getLog(ZlibFactory.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ZlibFactory.class);
 
   private static boolean nativeZlibLoaded = false;
   

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java

@@ -30,8 +30,6 @@ import java.util.Arrays;
 import java.util.Map;
 import java.util.TreeMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -43,6 +41,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarComparator;
 import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
 import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
 import org.apache.hadoop.io.file.tfile.Utils.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Block Compressed file, the underlying physical storage layer for TFile.
@@ -54,7 +54,7 @@ final class BCFile {
   // the current version of BCFile impl, increment them (major or minor) made
   // enough changes
   static final Version API_VERSION = new Version((short) 1, (short) 0);
-  static final Log LOG = LogFactory.getLog(BCFile.class);
+  static final Logger LOG = LoggerFactory.getLogger(BCFile.class);
 
   /**
    * Prevent the instantiation of BCFile objects.

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java

@@ -24,8 +24,6 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -35,6 +33,8 @@ import org.apache.hadoop.io.compress.Compressor;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
@@ -44,7 +44,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  * Compression related stuff.
  */
 final class Compression {
-  static final Log LOG = LogFactory.getLog(Compression.class);
+  static final Logger LOG = LoggerFactory.getLogger(Compression.class);
 
   /**
    * Prevent the instantiation of class.

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java

@@ -29,8 +29,6 @@ import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Comparator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -51,6 +49,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.BytesComparator;
 import org.apache.hadoop.io.file.tfile.CompareUtils.MemcmpRawComparator;
 import org.apache.hadoop.io.file.tfile.Utils.Version;
 import org.apache.hadoop.io.serializer.JavaSerializationComparator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A TFile is a container of key-value pairs. Both keys and values are type-less
@@ -131,7 +131,7 @@ import org.apache.hadoop.io.serializer.JavaSerializationComparator;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class TFile {
-  static final Log LOG = LogFactory.getLog(TFile.class);
+  static final Logger LOG = LoggerFactory.getLogger(TFile.class);
 
   private static final String CHUNK_BUF_SIZE_ATTR = "tfile.io.chunk.size";
   private static final String FS_INPUT_BUF_SIZE_ATTR =
@@ -335,7 +335,7 @@ public class TFile {
           writerBCF.close();
         }
       } finally {
-        IOUtils.cleanup(LOG, blkAppender, writerBCF);
+        IOUtils.cleanupWithLogger(LOG, blkAppender, writerBCF);
         blkAppender = null;
         writerBCF = null;
         state = State.CLOSED;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java

@@ -25,8 +25,6 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -36,12 +34,14 @@ import org.apache.hadoop.io.file.tfile.BCFile.BlockRegion;
 import org.apache.hadoop.io.file.tfile.BCFile.MetaIndexEntry;
 import org.apache.hadoop.io.file.tfile.TFile.TFileIndexEntry;
 import org.apache.hadoop.io.file.tfile.Utils.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Dumping the information of a TFile.
  */
 class TFileDumper {
-  static final Log LOG = LogFactory.getLog(TFileDumper.class);
+  static final Logger LOG = LoggerFactory.getLogger(TFileDumper.class);
 
   private TFileDumper() {
     // namespace object not constructable.
@@ -290,7 +290,7 @@ class TFileDumper {
         }
       }
     } finally {
-      IOUtils.cleanup(LOG, reader, fsdis);
+      IOUtils.cleanupWithLogger(LOG, reader, fsdis);
     }
   }
 }

+ 8 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

@@ -40,9 +40,9 @@ import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.PerformanceAdvisory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import sun.misc.Unsafe;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -98,7 +98,7 @@ public class NativeIO {
        write.  */
     public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
 
-    private static final Log LOG = LogFactory.getLog(NativeIO.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
 
     // Set to true via JNI if possible
     public static boolean fadvisePossible = false;
@@ -634,7 +634,7 @@ public class NativeIO {
     }
   }
 
-  private static final Log LOG = LogFactory.getLog(NativeIO.class);
+  private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
 
   private static boolean nativeLoaded = false;
 
@@ -940,10 +940,10 @@ public class NativeIO {
           position += transferred;
         }
       } finally {
-        IOUtils.cleanup(LOG, output);
-        IOUtils.cleanup(LOG, fos);
-        IOUtils.cleanup(LOG, input);
-        IOUtils.cleanup(LOG, fis);
+        IOUtils.cleanupWithLogger(LOG, output);
+        IOUtils.cleanupWithLogger(LOG, fos);
+        IOUtils.cleanupWithLogger(LOG, input);
+        IOUtils.cleanupWithLogger(LOG, fis);
       }
     }
   }

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java

@@ -22,10 +22,10 @@ import java.io.IOException;
 import java.io.FileDescriptor;
 
 import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A factory for creating shared file descriptors inside a given directory.
@@ -45,7 +45,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class SharedFileDescriptorFactory {
-  public static final Log LOG = LogFactory.getLog(SharedFileDescriptorFactory.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SharedFileDescriptorFactory.class);
   private final String prefix;
   private final String path;
 

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -32,8 +32,6 @@ import java.util.Map.Entry;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -41,6 +39,8 @@ import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
  */
 public class RetryPolicies {
   
-  public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
+  public static final Logger LOG = LoggerFactory.getLogger(RetryPolicies.class);
   
   /**
    * <p>

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java

@@ -19,17 +19,17 @@ package org.apache.hadoop.io.retry;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
 import org.apache.hadoop.ipc.RemoteException;
 
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.ipc.RetriableException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class RetryUtils {
-  public static final Log LOG = LogFactory.getLog(RetryUtils.class);
+  public static final Logger LOG = LoggerFactory.getLogger(RetryUtils.class);
   
   /**
    * Return the default retry policy set in conf.

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.io.serializer;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -31,6 +29,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
 import org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -41,8 +41,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 @InterfaceStability.Evolving
 public class SerializationFactory extends Configured {
   
-  private static final Log LOG =
-    LogFactory.getLog(SerializationFactory.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SerializationFactory.class.getName());
 
   private List<Serialization<?>> serializations = new ArrayList<Serialization<?>>();
   

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java

@@ -28,20 +28,21 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Abstracts queue operations for different blocking queues.
  */
 public class CallQueueManager<E extends Schedulable>
     extends AbstractQueue<E> implements BlockingQueue<E> {
-  public static final Log LOG = LogFactory.getLog(CallQueueManager.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(CallQueueManager.class);
   // Number of checkpoints for empty queue.
   private static final int CHECKPOINT_NUM = 20;
   // Interval to check empty queue.

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.ipc;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -57,6 +55,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.AsyncGet;
 import org.apache.htrace.core.Span;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.net.SocketFactory;
 import javax.security.sasl.Sasl;
@@ -84,7 +84,7 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 @InterfaceStability.Evolving
 public class Client implements AutoCloseable {
   
-  public static final Log LOG = LogFactory.getLog(Client.class);
+  public static final Logger LOG = LoggerFactory.getLogger(Client.class);
 
   /** A counter for generating call IDs. */
   private static final AtomicInteger callIdCounter = new AtomicInteger();

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java

@@ -33,11 +33,11 @@ import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.NotImplementedException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A queue with multiple levels for each priority.
@@ -50,7 +50,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
   public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
     "faircallqueue.priority-levels";
 
-  public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
+  public static final Logger LOG = LoggerFactory.getLogger(FairCallQueue.class);
 
   /* The queues */
   private final ArrayList<BlockingQueue<E>> queues;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.ipc;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.*;
 import com.google.protobuf.Descriptors.MethodDescriptor;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -39,6 +37,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.AsyncGet;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.net.SocketFactory;
 import java.io.IOException;
@@ -55,7 +55,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
  */
 @InterfaceStability.Evolving
 public class ProtobufRpcEngine implements RpcEngine {
-  public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ProtobufRpcEngine.class);
   private static final ThreadLocal<AsyncGet<Message, Exception>>
       ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
 

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -37,8 +37,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.*;
@@ -59,6 +57,8 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 
 import com.google.protobuf.BlockingService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** A simple RPC mechanism.
  *
@@ -109,7 +109,7 @@ public class RPC {
         Writable rpcRequest, long receiveTime) throws Exception ;
   }
   
-  static final Log LOG = LogFactory.getLog(RPC.class);
+  static final Logger LOG = LoggerFactory.getLogger(RPC.class);
   
   /**
    * Get all superInterfaces that extend VersionedProtocol

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java

@@ -24,9 +24,9 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Used to registry custom methods to refresh at runtime.
@@ -34,7 +34,8 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceStability.Unstable
 public class RefreshRegistry {
-  public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(RefreshRegistry.class);
 
   // Used to hold singleton instance
   private static class RegistryHolder {

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java

@@ -22,8 +22,6 @@ import java.util.Arrays;
 import java.util.UUID;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
 import org.apache.hadoop.util.LightWeightCache;
@@ -32,6 +30,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Maintains a cache of non-idempotent requests that have been successfully
@@ -44,7 +44,7 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public class RetryCache {
-  public static final Log LOG = LogFactory.getLog(RetryCache.class);
+  public static final Logger LOG = LoggerFactory.getLogger(RetryCache.class);
   private final RetryCacheMetrics retryCacheMetrics;
   private static final int MAX_CAPACITY = 16;
 

+ 8 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -69,8 +69,6 @@ import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslServer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -124,6 +122,8 @@ import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.Message;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** An abstract IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
@@ -292,9 +292,9 @@ public abstract class Server {
   }
   
 
-  public static final Log LOG = LogFactory.getLog(Server.class);
-  public static final Log AUDITLOG = 
-    LogFactory.getLog("SecurityLogger."+Server.class.getName());
+  public static final Logger LOG = LoggerFactory.getLogger(Server.class);
+  public static final Logger AUDITLOG =
+      LoggerFactory.getLogger("SecurityLogger."+Server.class.getName());
   private static final String AUTH_FAILED_FOR = "Auth failed for ";
   private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
   
@@ -1112,7 +1112,7 @@ public abstract class Server {
           } catch (IOException ex) {
             LOG.error("Error in Reader", ex);
           } catch (Throwable re) {
-            LOG.fatal("Bug in read selector!", re);
+            LOG.error("Bug in read selector!", re);
             ExitUtil.terminate(1, "Bug in read selector!");
           }
         }
@@ -2620,7 +2620,7 @@ public abstract class Server {
           }
         } finally {
           CurCall.set(null);
-          IOUtils.cleanup(LOG, traceScope);
+          IOUtils.cleanupWithLogger(LOG, traceScope);
         }
       }
       LOG.debug(Thread.currentThread().getName() + ": exiting");
@@ -2629,7 +2629,7 @@ public abstract class Server {
   }
 
   @VisibleForTesting
-  void logException(Log logger, Throwable e, Call call) {
+  void logException(Logger logger, Throwable e, Call call) {
     if (exceptionsHandler.isSuppressedLog(e.getClass())) {
       return; // Log nothing.
     }

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java

@@ -20,9 +20,9 @@ package org.apache.hadoop.ipc;
 
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Determines which queue to start reading from, occasionally drawing from
@@ -43,8 +43,8 @@ public class WeightedRoundRobinMultiplexer implements RpcMultiplexer {
   public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
     "faircallqueue.multiplexer.weights";
 
-  public static final Log LOG =
-    LogFactory.getLog(WeightedRoundRobinMultiplexer.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(WeightedRoundRobinMultiplexer.class);
 
   private final int numQueues; // The number of queues under our provisioning
 

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -28,8 +28,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
@@ -43,11 +41,13 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.*;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** An RpcEngine implementation for Writable data. */
 @InterfaceStability.Evolving
 public class WritableRpcEngine implements RpcEngine {
-  private static final Log LOG = LogFactory.getLog(RPC.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RPC.class);
   
   //writableRpcVersion should be updated if there is a change
   //in format of the rpc messages.

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.ipc.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RetryCache;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.annotation.Metric;
@@ -26,6 +24,8 @@ import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is for maintaining the various RetryCache-related statistics
@@ -35,7 +35,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 @Metrics(about="Aggregate RetryCache metrics", context="rpc")
 public class RetryCacheMetrics {
 
-  static final Log LOG = LogFactory.getLog(RetryCacheMetrics.class);
+  static final Logger LOG = LoggerFactory.getLogger(RetryCacheMetrics.class);
   final MetricsRegistry registry;
   final String name;
 

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java

@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.ipc.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is for maintaining RPC method related statistics
@@ -37,7 +37,7 @@ public class RpcDetailedMetrics {
   @Metric MutableRatesWithAggregation rates;
   @Metric MutableRatesWithAggregation deferredRpcRates;
 
-  static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
+  static final Logger LOG = LoggerFactory.getLogger(RpcDetailedMetrics.class);
   final MetricsRegistry registry;
   final String name;
 
@@ -45,7 +45,7 @@ public class RpcDetailedMetrics {
     name = "RpcDetailedActivityForPort"+ port;
     registry = new MetricsRegistry("rpcdetailed")
         .tag("port", "RPC port", String.valueOf(port));
-    LOG.debug(registry.info());
+    LOG.debug(registry.info().toString());
   }
 
   public String name() { return name; }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.ipc.metrics;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,6 +29,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is for maintaining  the various RPC statistics
@@ -40,7 +40,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate;
 @Metrics(about="Aggregate RPC metrics", context="rpc")
 public class RpcMetrics {
 
-  static final Log LOG = LogFactory.getLog(RpcMetrics.class);
+  static final Logger LOG = LoggerFactory.getLogger(RpcMetrics.class);
   final Server server;
   final MetricsRegistry registry;
   final String name;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -17,11 +17,11 @@
 
 package org.apache.hadoop.jmx;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.http.HttpServer2;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.management.AttributeNotFoundException;
 import javax.management.InstanceNotFoundException;
@@ -116,7 +116,8 @@ import java.util.Set;
  *
  */
 public class JMXJsonServlet extends HttpServlet {
-  private static final Log LOG = LogFactory.getLog(JMXJsonServlet.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JMXJsonServlet.class);
   static final String ACCESS_CONTROL_ALLOW_METHODS =
       "Access-Control-Allow-Methods";
   static final String ACCESS_CONTROL_ALLOW_ORIGIN =

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsUtil.java

@@ -20,10 +20,10 @@ package org.apache.hadoop.metrics;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utility class to simplify creation and reporting of hadoop metrics.
@@ -39,8 +39,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Evolving
 public class MetricsUtil {
     
-  public static final Log LOG =
-    LogFactory.getLog(MetricsUtil.class);
+  public static final Logger LOG = LoggerFactory.getLogger(MetricsUtil.class);
 
   /**
    * Don't allow creation of a new instance of Metrics

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java

@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.io.Charsets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -34,6 +32,8 @@ import org.apache.hadoop.metrics.ContextFactory;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
 import org.apache.hadoop.metrics.spi.OutputRecord;
 import org.apache.hadoop.metrics.spi.Util;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Context for sending metrics to Ganglia.
@@ -63,7 +63,7 @@ public class GangliaContext extends AbstractMetricsContext {
   private static final int BUFFER_SIZE = 1500;       // as per libgmond.c
   private static final int DEFAULT_MULTICAST_TTL = 1;
 
-  private final Log LOG = LogFactory.getLog(this.getClass());    
+  private final Logger LOG = LoggerFactory.getLogger(this.getClass());
 
   private static final Map<Class,String> typeTable = new HashMap<Class,String>(5);
     
@@ -126,7 +126,7 @@ public class GangliaContext extends AbstractMetricsContext {
         datagramSocket = new DatagramSocket();
       }
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error(e.toString());
     }
   }
 

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java

@@ -23,11 +23,11 @@ import java.net.DatagramPacket;
 import java.net.SocketAddress;
 import java.net.UnknownHostException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics.ContextFactory;
 import org.apache.hadoop.net.DNS;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Context for sending metrics to Ganglia version 3.1.x.
@@ -42,8 +42,8 @@ public class GangliaContext31 extends GangliaContext {
 
   String hostName = "UNKNOWN.example.com";
 
-  private static final Log LOG = 
-    LogFactory.getLog("org.apache.hadoop.util.GangliaContext31");
+  private static final Logger LOG =
+      LoggerFactory.getLogger("org.apache.hadoop.util.GangliaContext31");
 
   public void init(String contextName, ContextFactory factory) {
     super.init(contextName, factory);
@@ -62,7 +62,7 @@ public class GangliaContext31 extends GangliaContext {
           conf.get("dfs.datanode.dns.interface","default"),
           conf.get("dfs.datanode.dns.nameserver","default"));
       } catch (UnknownHostException uhe) {
-        LOG.error(uhe);
+        LOG.error(uhe.toString());
     	hostName = "UNKNOWN.example.com";
       }
     }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/jvm/JvmMetrics.java

@@ -29,12 +29,12 @@ import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.Updater;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static java.lang.Thread.State.*;
 import java.lang.management.GarbageCollectorMXBean;
 import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 /**
  * Singleton class which reports Java Virtual Machine metrics to the metrics API.  
@@ -50,7 +50,7 @@ public class JvmMetrics implements Updater {
     
     private static final float M = 1024*1024;
     private static JvmMetrics theInstance = null;
-    private static Log log = LogFactory.getLog(JvmMetrics.class);
+    private static Logger log = LoggerFactory.getLogger(JvmMetrics.class);
     
     private MetricsRecord metrics;
     

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java

@@ -23,9 +23,6 @@ import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics.ContextFactory;
@@ -33,6 +30,8 @@ import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.Updater;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * @deprecated Use org.apache.hadoop.metrics2 package instead.
@@ -42,7 +41,8 @@ import org.apache.hadoop.metrics.Updater;
 @InterfaceStability.Evolving
 public class CompositeContext extends AbstractMetricsContext {
 
-  private static final Log LOG = LogFactory.getLog(CompositeContext.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CompositeContext.class);
   private static final String ARITY_LABEL = "arity";
   private static final String SUB_FMT = "%s.sub%d";
   private final ArrayList<MetricsContext> subctxt =

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsIntValue.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.metrics.util;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The MetricsIntValue class is for a metric that is not time varied
@@ -34,8 +34,8 @@ import org.apache.commons.logging.LogFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 public class MetricsIntValue extends MetricsBase {  
 
-  private static final Log LOG =
-    LogFactory.getLog("org.apache.hadoop.metrics.util");
+  private static final Logger LOG =
+      LoggerFactory.getLogger("org.apache.hadoop.metrics.util");
 
   private int value;
   private boolean changed;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.metrics.util;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The MetricsTimeVaryingInt class is for a metric that naturally
@@ -37,8 +37,8 @@ import org.apache.commons.logging.LogFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 public class MetricsTimeVaryingInt extends MetricsBase {
 
-  private static final Log LOG =
-    LogFactory.getLog("org.apache.hadoop.metrics.util");
+  private static final Logger LOG =
+      LoggerFactory.getLogger("org.apache.hadoop.metrics.util");
   
   private int currentValue;
   private int previousIntervalValue;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.metrics.util;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The MetricsTimeVaryingLong class is for a metric that naturally
@@ -38,8 +38,8 @@ import org.apache.commons.logging.LogFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 public class MetricsTimeVaryingLong extends MetricsBase{
 
-  private static final Log LOG =
-    LogFactory.getLog("org.apache.hadoop.metrics.util");
+  private static final Logger LOG =
+      LoggerFactory.getLogger("org.apache.hadoop.metrics.util");
  
   private long currentValue;
   private long previousIntervalValue;

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.metrics.util;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The MetricsTimeVaryingRate class is for a rate based metric that
@@ -36,8 +36,8 @@ import org.apache.commons.logging.LogFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 public class MetricsTimeVaryingRate extends MetricsBase {
 
-  private static final Log LOG =
-    LogFactory.getLog("org.apache.hadoop.metrics.util");
+  private static final Logger LOG =
+      LoggerFactory.getLogger("org.apache.hadoop.metrics.util");
 
   static class Metrics {
     int numOperations = 0;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java

@@ -106,7 +106,7 @@ class MBeanInfoBuilder implements MetricsVisitor {
       }
       ++curRecNo;
     }
-    MetricsSystemImpl.LOG.debug(attrs);
+    MetricsSystemImpl.LOG.debug(attrs.toString());
     MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
     return new MBeanInfo(name, description, attrs.toArray(attrsArray),
                          null, null, null); // no ops/ctors/notifications

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java

@@ -39,18 +39,18 @@ import org.apache.commons.configuration.Configuration;
 import org.apache.commons.configuration.ConfigurationException;
 import org.apache.commons.configuration.PropertiesConfiguration;
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsFilter;
 import org.apache.hadoop.metrics2.MetricsPlugin;
 import org.apache.hadoop.metrics2.filter.GlobFilter;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Metrics configuration for MetricsSystemImpl
  */
 class MetricsConfig extends SubsetConfiguration {
-  static final Log LOG = LogFactory.getLog(MetricsConfig.class);
+  static final Logger LOG = LoggerFactory.getLogger(MetricsConfig.class);
 
   static final String DEFAULT_FILE_NAME = "hadoop-metrics2.properties";
   static final String PREFIX_DEFAULT = "*.";
@@ -115,7 +115,7 @@ class MetricsConfig extends SubsetConfiguration {
         LOG.info("loaded properties from "+ fname);
         LOG.debug(toString(cf));
         MetricsConfig mc = new MetricsConfig(cf, prefix);
-        LOG.debug(mc);
+        LOG.debug(mc.toString());
         return mc;
       }
       catch (ConfigurationException e) {

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java

@@ -24,8 +24,6 @@ import java.util.concurrent.*;
 
 import static com.google.common.base.Preconditions.*;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@@ -36,13 +34,16 @@ import static org.apache.hadoop.metrics2.util.Contracts.*;
 import org.apache.hadoop.metrics2.MetricsFilter;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An adapter class for metrics sink and associated filters
  */
 class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
 
-  private final Log LOG = LogFactory.getLog(MetricsSinkAdapter.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MetricsSinkAdapter.class);
   private final String name, description, context;
   private final MetricsSink sink;
   private final MetricsFilter sourceFilter, recordFilter, metricFilter;
@@ -210,7 +211,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
     stopping = true;
     sinkThread.interrupt();
     if (sink instanceof Closeable) {
-      IOUtils.cleanup(LOG, (Closeable)sink);
+      IOUtils.cleanupWithLogger(LOG, (Closeable)sink);
     }
     try {
       sinkThread.join();

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java

@@ -33,8 +33,6 @@ import static com.google.common.base.Preconditions.*;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsFilter;
@@ -43,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
 import static org.apache.hadoop.metrics2.impl.MetricsConfig.*;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.metrics2.util.Contracts.*;
 
@@ -51,7 +51,8 @@ import static org.apache.hadoop.metrics2.util.Contracts.*;
  */
 class MetricsSourceAdapter implements DynamicMBean {
 
-  private static final Log LOG = LogFactory.getLog(MetricsSourceAdapter.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MetricsSourceAdapter.class);
 
   private final String prefix, name;
   private final MetricsSource source;

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -36,8 +36,6 @@ import com.google.common.annotations.VisibleForTesting;
 import static com.google.common.base.Preconditions.*;
 
 import org.apache.commons.configuration.PropertiesConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.commons.math3.util.ArithmeticUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.MetricsInfo;
@@ -62,6 +60,8 @@ import org.apache.hadoop.metrics2.lib.MutableStat;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A base class for metrics system singletons
@@ -70,7 +70,7 @@ import org.apache.hadoop.util.Time;
 @Metrics(context="metricssystem")
 public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
 
-  static final Log LOG = LogFactory.getLog(MetricsSystemImpl.class);
+  static final Logger LOG = LoggerFactory.getLogger(MetricsSystemImpl.class);
   static final String MS_NAME = "MetricsSystem";
   static final String MS_STATS_NAME = MS_NAME +",sub=Stats";
   static final String MS_STATS_DESC = "Metrics system metrics";

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java

@@ -22,20 +22,21 @@ import java.lang.reflect.Method;
 
 import static com.google.common.base.Preconditions.*;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.annotation.Metric;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import static org.apache.hadoop.metrics2.util.Contracts.*;
 
 /**
  * Metric generated from a method, mostly used by annotation
  */
 class MethodMetric extends MutableMetric {
-  private static final Log LOG = LogFactory.getLog(MethodMetric.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MethodMetric.class);
 
   private final Object obj;
   private final Method method;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java

@@ -24,8 +24,6 @@ import java.lang.reflect.Method;
 
 import static com.google.common.base.Preconditions.*;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsException;
@@ -34,13 +32,16 @@ import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class to build metrics source object from annotations
  */
 @InterfaceAudience.Private
 public class MetricsSourceBuilder {
-  private static final Log LOG = LogFactory.getLog(MetricsSourceBuilder.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MetricsSourceBuilder.class);
 
   private final Object source;
   private final MutableMetricsFactory factory;

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java

@@ -22,19 +22,20 @@ import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class MutableMetricsFactory {
-  private static final Log LOG = LogFactory.getLog(MutableMetricsFactory.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MutableMetricsFactory.class);
 
   MutableMetric newForField(Field field, Metric annotation,
                             MetricsRegistry registry) {

+ 3 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java

@@ -24,12 +24,11 @@ import java.util.Set;
 import static com.google.common.base.Preconditions.*;
 import com.google.common.collect.Sets;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class to manage a group of mutable rate metrics
@@ -43,7 +42,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class MutableRates extends MutableMetric {
-  static final Log LOG = LogFactory.getLog(MutableRates.class);
+  static final Logger LOG = LoggerFactory.getLogger(MutableRates.class);
   private final MetricsRegistry registry;
   private final Set<Class<?>> protocolCache = Sets.newHashSet();
 

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java

@@ -27,12 +27,12 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ConcurrentMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.util.SampleStat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -48,7 +48,8 @@ import org.apache.hadoop.metrics2.util.SampleStat;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class MutableRatesWithAggregation extends MutableMetric {
-  static final Log LOG = LogFactory.getLog(MutableRatesWithAggregation.class);
+  static final Logger LOG =
+      LoggerFactory.getLogger(MutableRatesWithAggregation.class);
   private final Map<String, MutableRate> globalMetrics =
       new ConcurrentHashMap<>();
   private final Set<Class<?>> protocolCache = Sets.newHashSet();

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java

@@ -19,8 +19,6 @@
 package org.apache.hadoop.metrics2.sink;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.AbstractMetric;
@@ -28,6 +26,8 @@ import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -42,7 +42,8 @@ import java.nio.charset.StandardCharsets;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class GraphiteSink implements MetricsSink, Closeable {
-    private static final Log LOG = LogFactory.getLog(GraphiteSink.class);
+    private static final Logger LOG =
+        LoggerFactory.getLogger(GraphiteSink.class);
     private static final String SERVER_HOST_KEY = "server_host";
     private static final String SERVER_PORT_KEY = "server_port";
     private static final String METRICS_PREFIX = "metrics_prefix";

部分文件因文件數量過多而無法顯示