Browse Source

HADOOP-18206 Cleanup the commons-logging references and restrict its usage in future (#5315)

Viraj Jasani 2 years ago
parent
commit
90de1ff151
71 changed files with 223 additions and 532 deletions
  1. 0 1
      LICENSE-binary
  2. 0 5
      hadoop-common-project/hadoop-common/pom.xml
  3. 0 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  4. 7 33
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  5. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
  6. 0 78
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
  7. 7 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
  8. 10 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java
  9. 6 22
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  10. 4 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
  11. 4 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java
  12. 0 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java
  13. 0 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
  14. 1 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
  15. 7 64
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
  16. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
  17. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
  18. 0 5
      hadoop-common-project/hadoop-nfs/pom.xml
  19. 0 4
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  20. 0 5
      hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
  21. 0 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
  22. 3 3
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
  23. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
  24. 0 5
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  25. 7 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
  26. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  27. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  28. 6 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  29. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java
  30. 12 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  31. 6 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  32. 4 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
  33. 3 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  34. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
  35. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
  36. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java
  37. 3 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
  38. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
  39. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
  40. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
  41. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
  42. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  43. 3 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
  44. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java
  45. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java
  46. 3 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java
  47. 0 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
  48. 0 4
      hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
  49. 0 6
      hadoop-project/pom.xml
  50. 0 5
      hadoop-tools/hadoop-archive-logs/pom.xml
  51. 3 3
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
  52. 3 4
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
  53. 3 3
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
  54. 3 4
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
  55. 0 4
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
  56. 4 3
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java
  57. 1 5
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java
  58. 0 4
      hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
  59. 3 3
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
  60. 6 6
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
  61. 3 3
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
  62. 3 4
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java
  63. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java
  64. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java
  65. 3 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java
  66. 3 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java
  67. 4 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java
  68. 3 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
  69. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java
  70. 16 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
  71. 7 0
      pom.xml

+ 0 - 1
LICENSE-binary

@@ -250,7 +250,6 @@ commons-codec:commons-codec:1.11
 commons-collections:commons-collections:3.2.2
 commons-daemon:commons-daemon:1.0.13
 commons-io:commons-io:2.8.0
-commons-logging:commons-logging:1.1.3
 commons-net:commons-net:3.9.0
 de.ruedigermoeller:fst:2.50
 io.grpc:grpc-api:1.26.0

+ 0 - 5
hadoop-common-project/hadoop-common/pom.xml

@@ -180,11 +180,6 @@
       <artifactId>jersey-server</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>

+ 0 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -32,7 +32,6 @@ import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -246,30 +245,6 @@ public class IOUtils {
     }
   }
   
-  /**
-   * Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
-   * null pointers. Must only be used for cleanup in exception handlers.
-   *
-   * @param log the log to record problems to at debug level. Can be null.
-   * @param closeables the objects to close
-   * @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
-   * instead
-   */
-  @Deprecated
-  public static void cleanup(Log log, java.io.Closeable... closeables) {
-    for (java.io.Closeable c : closeables) {
-      if (c != null) {
-        try {
-          c.close();
-        } catch(Throwable e) {
-          if (log != null && log.isDebugEnabled()) {
-            log.debug("Exception in closing " + c, e);
-          }
-        }
-      }
-    }
-  }
-
   /**
    * Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
    * null pointers. Must only be used for cleanup in exception handlers.

+ 7 - 33
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -34,10 +34,6 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Jdk14Logger;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -51,6 +47,8 @@ import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 
 /**
  * Change log level in runtime.
@@ -340,22 +338,14 @@ public class LogLevel {
         out.println(MARKER
             + "Submitted Class Name: <b>" + logName + "</b><br />");
 
-        Log log = LogFactory.getLog(logName);
+        Logger log = Logger.getLogger(logName);
         out.println(MARKER
             + "Log Class: <b>" + log.getClass().getName() +"</b><br />");
         if (level != null) {
           out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
         }
 
-        if (log instanceof Log4JLogger) {
-          process(((Log4JLogger)log).getLogger(), level, out);
-        }
-        else if (log instanceof Jdk14Logger) {
-          process(((Jdk14Logger)log).getLogger(), level, out);
-        }
-        else {
-          out.println("Sorry, " + log.getClass() + " not supported.<br />");
-        }
+        process(log, level, out);
       }
 
       out.println(FORMS);
@@ -371,14 +361,14 @@ public class LogLevel {
         + "<input type='submit' value='Set Log Level' />"
         + "</form>";
 
-    private static void process(org.apache.log4j.Logger log, String level,
+    private static void process(Logger log, String level,
         PrintWriter out) throws IOException {
       if (level != null) {
-        if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level)
+        if (!level.equalsIgnoreCase(Level.toLevel(level)
             .toString())) {
           out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
         } else {
-          log.setLevel(org.apache.log4j.Level.toLevel(level));
+          log.setLevel(Level.toLevel(level));
           out.println(MARKER + "Setting Level to " + level + " ...<br />");
         }
       }
@@ -386,21 +376,5 @@ public class LogLevel {
           + "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
     }
 
-    private static void process(java.util.logging.Logger log, String level,
-        PrintWriter out) throws IOException {
-      if (level != null) {
-        String levelToUpperCase = level.toUpperCase();
-        try {
-          log.setLevel(java.util.logging.Level.parse(levelToUpperCase));
-        } catch (IllegalArgumentException e) {
-          out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
-        }
-        out.println(MARKER + "Setting Level to " + level + " ...<br />");
-      }
-
-      java.util.logging.Level lev;
-      for(; (lev = log.getLevel()) == null; log = log.getParent());
-      out.println(MARKER + "Effective Level: <b>" + lev + "</b><br />");
-    }
   }
 }

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.service;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
@@ -75,9 +74,10 @@ public final class ServiceOperations {
    * @param log the log to warn at
    * @param service a service; may be null
    * @return any exception that was caught; null if none was.
-   * @see ServiceOperations#stopQuietly(Service)
+   * @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead.
    */
-  public static Exception stopQuietly(Log log, Service service) {
+  @Deprecated
+  public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) {
     try {
       stop(service);
     } catch (Exception e) {

+ 0 - 78
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java

@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.util;
-
-import org.apache.commons.logging.Log;
-import org.slf4j.Logger;
-
-class LogAdapter {
-  private Log LOG;
-  private Logger LOGGER;
-
-  private LogAdapter(Log LOG) {
-    this.LOG = LOG;
-  }
-
-  private LogAdapter(Logger LOGGER) {
-    this.LOGGER = LOGGER;
-  }
-
-  /**
-   * @deprecated use {@link #create(Logger)} instead
-   */
-  @Deprecated
-  public static LogAdapter create(Log LOG) {
-    return new LogAdapter(LOG);
-  }
-
-  public static LogAdapter create(Logger LOGGER) {
-    return new LogAdapter(LOGGER);
-  }
-
-  public void info(String msg) {
-    if (LOG != null) {
-      LOG.info(msg);
-    } else if (LOGGER != null) {
-      LOGGER.info(msg);
-    }
-  }
-
-  public void warn(String msg, Throwable t) {
-    if (LOG != null) {
-      LOG.warn(msg, t);
-    } else if (LOGGER != null) {
-      LOGGER.warn(msg, t);
-    }
-  }
-
-  public void debug(Throwable t) {
-    if (LOG != null) {
-      LOG.debug(t);
-    } else if (LOGGER != null) {
-      LOGGER.debug("", t);
-    }
-  }
-
-  public void error(String msg) {
-    if (LOG != null) {
-      LOG.error(msg);
-    } else if (LOGGER != null) {
-      LOGGER.error(msg);
-    }
-  }
-}

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java

@@ -36,7 +36,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
@@ -222,16 +221,18 @@ public class ReflectionUtils {
   }
     
   private static long previousLogTime = 0;
-    
+
   /**
    * Log the current thread stacks at INFO level.
    * @param log the logger that logs the stack trace
    * @param title a descriptive title for the call stacks
-   * @param minInterval the minimum time from the last 
+   * @param minInterval the minimum time from the last
+   * @deprecated to be removed with 3.4.0. Use {@link #logThreadInfo(Logger, String, long)} instead.
    */
-  public static void logThreadInfo(Log log,
-                                   String title,
-                                   long minInterval) {
+  @Deprecated
+  public static void logThreadInfo(org.apache.commons.logging.Log log,
+      String title,
+      long minInterval) {
     boolean dumpStack = false;
     if (log.isInfoEnabled()) {
       synchronized (ReflectionUtils.class) {

+ 10 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java

@@ -18,10 +18,10 @@
 
 package org.apache.hadoop.util;
 
+import org.slf4j.Logger;
 import sun.misc.Signal;
 import sun.misc.SignalHandler;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -42,11 +42,11 @@ public enum SignalLogger {
    * Our signal handler.
    */
   private static class Handler implements SignalHandler {
-    final private LogAdapter LOG;
+    final private Logger log;
     final private SignalHandler prevHandler;
 
-    Handler(String name, LogAdapter LOG) {
-      this.LOG = LOG;
+    Handler(String name, Logger log) {
+      this.log = log;
       prevHandler = Signal.handle(new Signal(name), this);
     }
 
@@ -57,7 +57,7 @@ public enum SignalLogger {
      */
     @Override
     public void handle(Signal signal) {
-      LOG.error("RECEIVED SIGNAL " + signal.getNumber() +
+      log.error("RECEIVED SIGNAL " + signal.getNumber() +
           ": SIG" + signal.getName());
       prevHandler.handle(signal);
     }
@@ -66,13 +66,9 @@ public enum SignalLogger {
   /**
    * Register some signal handlers.
    *
-   * @param LOG        The log4j logfile to use in the signal handlers.
+   * @param log The log4j logfile to use in the signal handlers.
    */
-  public void register(final Log LOG) {
-    register(LogAdapter.create(LOG));
-  }
-
-  void register(final LogAdapter LOG) {
+  public void register(final Logger log) {
     if (registered) {
       throw new IllegalStateException("Can't re-install the signal handlers.");
     }
@@ -83,15 +79,15 @@ public enum SignalLogger {
     String separator = "";
     for (String signalName : SIGNALS) {
       try {
-        new Handler(signalName, LOG);
+        new Handler(signalName, log);
         bld.append(separator)
             .append(signalName);
         separator = ", ";
       } catch (Exception e) {
-        LOG.debug(e);
+        log.debug("Error: ", e);
       }
     }
     bld.append("]");
-    LOG.info(bld.toString());
+    log.info(bld.toString());
   }
 }

+ 6 - 22
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -740,42 +740,26 @@ public class StringUtils {
    * Print a log message for starting up and shutting down
    * @param clazz the class of the server
    * @param args arguments
-   * @param LOG the target log object
+   * @param log the target log object
    */
   public static void startupShutdownMessage(Class<?> clazz, String[] args,
-                                     final org.apache.commons.logging.Log LOG) {
-    startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
-  }
-
-  /**
-   * Print a log message for starting up and shutting down
-   * @param clazz the class of the server
-   * @param args arguments
-   * @param LOG the target log object
-   */
-  public static void startupShutdownMessage(Class<?> clazz, String[] args,
-                                     final org.slf4j.Logger LOG) {
-    startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
-  }
-
-  static void startupShutdownMessage(Class<?> clazz, String[] args,
-                                     final LogAdapter LOG) { 
+                                     final org.slf4j.Logger log) {
     final String hostname = NetUtils.getHostname();
     final String classname = clazz.getSimpleName();
-    LOG.info(createStartupShutdownMessage(classname, hostname, args));
+    log.info(createStartupShutdownMessage(classname, hostname, args));
 
     if (SystemUtils.IS_OS_UNIX) {
       try {
-        SignalLogger.INSTANCE.register(LOG);
+        SignalLogger.INSTANCE.register(log);
       } catch (Throwable t) {
-        LOG.warn("failed to register any UNIX signal loggers: ", t);
+        log.warn("failed to register any UNIX signal loggers: ", t);
       }
     }
     ShutdownHookManager.get().addShutdownHook(
       new Runnable() {
         @Override
         public void run() {
-          LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
+          log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
             "Shutting down " + classname + " at " + hostname}));
           LogManager.shutdown();
         }

+ 4 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java

@@ -25,8 +25,6 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.net.URI;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -39,7 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * 
@@ -51,8 +50,8 @@ import org.junit.Test;
  */
 
 public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
-  private static final Log LOG =
-      LogFactory.getLog(TestViewFileSystemLocalFileSystem.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class);
 
   @Override
   @Before

+ 4 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java

@@ -21,8 +21,6 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -35,6 +33,8 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -43,8 +43,8 @@ import org.junit.Test;
  */
 public class TestViewFileSystemOverloadSchemeLocalFileSystem {
   private static final String FILE = "file";
-  private static final Log LOG =
-      LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
   private FileSystem fsTarget;
   private Configuration conf;
   private Path targetTestRoot;

+ 0 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.http;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.minikdc.MiniKdc;
@@ -53,8 +51,6 @@ import static org.junit.Assert.assertTrue;
  */
 public class TestHttpServerWithSpnego {
 
-  static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class);
-
   private static final String SECRET_STR = "secret";
   private static final String HTTP_USER = "HTTP";
   private static final String PREFIX = "hadoop.http.authentication.";

+ 0 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java

@@ -22,8 +22,6 @@ import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.node.ContainerNode;
 import org.junit.Test;
 import static org.junit.Assert.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Appender;
 import org.apache.log4j.Category;
@@ -44,8 +42,6 @@ import java.util.Vector;
 
 public class TestLog4Json {
 
-  private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
-
   @Test
   public void testConstruction() throws Throwable {
     Log4Json l4j = new Log4Json();

+ 1 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java

@@ -22,9 +22,6 @@ import java.net.SocketException;
 import java.net.URI;
 import java.util.concurrent.Callable;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
   private final String logName = TestLogLevel.class.getName();
   private String clientPrincipal;
   private String serverPrincipal;
-  private final Log testlog = LogFactory.getLog(logName);
-  private final Logger log = ((Log4JLogger)testlog).getLogger();
+  private final Logger log = Logger.getLogger(logName);
   private final static String PRINCIPAL = "loglevel.principal";
   private final static String KEYTAB  = "loglevel.keytab";
   private static final String PREFIX = "hadoop.http.authentication.";

+ 7 - 64
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java

@@ -49,8 +49,6 @@ import java.util.function.Supplier;
 import java.util.regex.Pattern;
 
 import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -117,29 +115,11 @@ public abstract class GenericTestUtils {
   public static final String ERROR_INVALID_ARGUMENT =
       "Total wait time should be greater than check interval time";
 
-  /**
-   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
-   */
-  @Deprecated
-  @SuppressWarnings("unchecked")
-  public static void disableLog(Log log) {
-    // We expect that commons-logging is a wrapper around Log4j.
-    disableLog((Log4JLogger) log);
-  }
-
   @Deprecated
   public static Logger toLog4j(org.slf4j.Logger logger) {
     return LogManager.getLogger(logger.getName());
   }
 
-  /**
-   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
-   */
-  @Deprecated
-  public static void disableLog(Log4JLogger log) {
-    log.getLogger().setLevel(Level.OFF);
-  }
-
   /**
    * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
    */
@@ -152,45 +132,6 @@ public abstract class GenericTestUtils {
     disableLog(toLog4j(logger));
   }
 
-  /**
-   * @deprecated
-   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
-   */
-  @Deprecated
-  @SuppressWarnings("unchecked")
-  public static void setLogLevel(Log log, Level level) {
-    // We expect that commons-logging is a wrapper around Log4j.
-    setLogLevel((Log4JLogger) log, level);
-  }
-
-  /**
-   * A helper used in log4j2 migration to accept legacy
-   * org.apache.commons.logging apis.
-   * <p>
-   * And will be removed after migration.
-   *
-   * @param log   a log
-   * @param level level to be set
-   */
-  @Deprecated
-  public static void setLogLevel(Log log, org.slf4j.event.Level level) {
-    setLogLevel(log, Level.toLevel(level.toString()));
-  }
-
-  /**
-   * @deprecated
-   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
-   */
-  @Deprecated
-  public static void setLogLevel(Log4JLogger log, Level level) {
-    log.getLogger().setLevel(level);
-  }
-
-  /**
-   * @deprecated
-   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
-   */
-  @Deprecated
   public static void setLogLevel(Logger logger, Level level) {
     logger.setLevel(level);
   }
@@ -535,15 +476,17 @@ public abstract class GenericTestUtils {
     private WriterAppender appender;
     private Logger logger;
 
-    public static LogCapturer captureLogs(Log l) {
-      Logger logger = ((Log4JLogger)l).getLogger();
-      return new LogCapturer(logger);
-    }
-
     public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+      if (logger.getName().equals("root")) {
+        return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
+      }
       return new LogCapturer(toLog4j(logger));
     }
 
+    public static LogCapturer captureLogs(Logger logger) {
+      return new LogCapturer(logger);
+    }
+
     private LogCapturer(Logger logger) {
       this.logger = logger;
       Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java

@@ -18,10 +18,10 @@
 
 package org.apache.hadoop.util;
 
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -43,7 +43,7 @@ public class TestJarFinder {
   public void testJar() throws Exception {
 
     //picking a class that is for sure in a JAR in the classpath
-    String jar = JarFinder.getJar(LogFactory.class);
+    String jar = JarFinder.getJar(LoggerFactory.class);
     Assert.assertTrue(new File(jar).exists());
   }
 

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java

@@ -32,9 +32,9 @@ public class TestSignalLogger {
   @Test(timeout=60000)
   public void testInstall() throws Exception {
     Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
-    SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
+    SignalLogger.INSTANCE.register(LOG);
     try {
-      SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
+      SignalLogger.INSTANCE.register(LOG);
       Assert.fail("expected IllegalStateException from double registration");
     } catch (IllegalStateException e) {
       // fall through

+ 0 - 5
hadoop-common-project/hadoop-nfs/pom.xml

@@ -63,11 +63,6 @@
       <artifactId>mockito-core</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
       <artifactId>javax.servlet-api</artifactId>

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -61,10 +61,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>hadoop-common</artifactId>
       <scope>provided</scope>
       <exclusions>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>log4j</groupId>
           <artifactId>log4j</artifactId>

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml

@@ -133,11 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>commons-io</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>commons-daemon</groupId>
       <artifactId>commons-daemon</artifactId>

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml

@@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>hadoop-common</artifactId>
       <scope>provided</scope>
       <exclusions>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>log4j</groupId>
           <artifactId>log4j</artifactId>

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

@@ -2054,7 +2054,7 @@ public class TestRouterRpc {
   @Test
   public void testMkdirsWithCallerContext() throws IOException {
     GenericTestUtils.LogCapturer auditlog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // Current callerContext is null
     assertNull(CallerContext.getCurrent());
@@ -2092,7 +2092,7 @@ public class TestRouterRpc {
   @Test
   public void testAddClientIpPortToCallerContext() throws IOException {
     GenericTestUtils.LogCapturer auditLog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // 1. ClientIp and ClientPort are not set on the client.
     // Set client context.
@@ -2127,7 +2127,7 @@ public class TestRouterRpc {
   @Test
   public void testAddClientIdAndCallIdToCallerContext() throws IOException {
     GenericTestUtils.LogCapturer auditLog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // 1. ClientId and ClientCallId are not set on the client.
     // Set client context.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java

@@ -440,7 +440,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
   @Test
   public void testCallerContextWithMultiDestinations() throws IOException {
     GenericTestUtils.LogCapturer auditLog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // set client context
     CallerContext.setCurrent(

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -117,11 +117,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>commons-io</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>commons-daemon</groupId>
       <artifactId>commons-daemon</artifactId>

+ 7 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java

@@ -31,8 +31,6 @@ import javax.management.MBeanServer;
 import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.metrics2.util.MBeans;
@@ -58,13 +56,12 @@ public class MetricsLoggerTask implements Runnable {
     }
   }
 
-  private Log metricsLog;
+  private org.apache.log4j.Logger metricsLog;
   private String nodeName;
   private short maxLogLineLength;
 
-  public MetricsLoggerTask(Log metricsLog, String nodeName,
-      short maxLogLineLength) {
-    this.metricsLog = metricsLog;
+  public MetricsLoggerTask(String metricsLog, String nodeName, short maxLogLineLength) {
+    this.metricsLog = org.apache.log4j.Logger.getLogger(metricsLog);
     this.nodeName = nodeName;
     this.maxLogLineLength = maxLogLineLength;
   }
@@ -118,13 +115,8 @@ public class MetricsLoggerTask implements Runnable {
         .substring(0, maxLogLineLength) + "...");
   }
 
-  private static boolean hasAppenders(Log logger) {
-    if (!(logger instanceof Log4JLogger)) {
-      // Don't bother trying to determine the presence of appenders.
-      return true;
-    }
-    Log4JLogger log4JLogger = ((Log4JLogger) logger);
-    return log4JLogger.getLogger().getAllAppenders().hasMoreElements();
+  private static boolean hasAppenders(org.apache.log4j.Logger logger) {
+    return logger.getAllAppenders().hasMoreElements();
   }
 
   /**
@@ -150,13 +142,8 @@ public class MetricsLoggerTask implements Runnable {
    * Make the metrics logger async and add all pre-existing appenders to the
    * async appender.
    */
-  public static void makeMetricsLoggerAsync(Log metricsLog) {
-    if (!(metricsLog instanceof Log4JLogger)) {
-      LOG.warn("Metrics logging will not be async since "
-          + "the logger is not log4j");
-      return;
-    }
-    org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger();
+  public static void makeMetricsLoggerAsync(String metricsLog) {
+    org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(metricsLog);
     logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
 
     @SuppressWarnings("unchecked")

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -35,7 +35,6 @@ import java.util.Queue;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.zip.Checksum;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.fs.StorageType;
@@ -73,7 +72,7 @@ import org.slf4j.Logger;
  **/
 class BlockReceiver implements Closeable {
   public static final Logger LOG = DataNode.LOG;
-  static final Log ClientTraceLog = DataNode.ClientTraceLog;
+  static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
 
   @VisibleForTesting
   static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
@@ -1398,7 +1397,7 @@ class BlockReceiver implements Closeable {
     public void run() {
       datanode.metrics.incrDataNodePacketResponderCount();
       boolean lastPacketInBlock = false;
-      final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
+      final long startTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
       while (isRunning() && !lastPacketInBlock) {
         long totalAckTimeNanos = 0;
         boolean isInterrupted = false;
@@ -1553,7 +1552,7 @@ class BlockReceiver implements Closeable {
       // Hold a volume reference to finalize block.
       try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
         BlockReceiver.this.close();
-        endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
+        endTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
         block.setNumBytes(replicaInfo.getNumBytes());
         datanode.data.finalizeBlock(block, dirSyncOnFinalize);
       }
@@ -1564,11 +1563,11 @@ class BlockReceiver implements Closeable {
       
       datanode.closeBlock(block, null, replicaInfo.getStorageUuid(),
           replicaInfo.isOnTransientStorage());
-      if (ClientTraceLog.isInfoEnabled() && isClient) {
+      if (CLIENT_TRACE_LOG.isInfoEnabled() && isClient) {
         long offset = 0;
         DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
             .getBlockPoolId());
-        ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
+        CLIENT_TRACE_LOG.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
             myAddr, replicaInfo.getVolume(), block.getNumBytes(),
             "HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(),
             block, endTime - startTime));

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -32,7 +32,6 @@ import java.nio.channels.FileChannel;
 import java.util.Arrays;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.hdfs.DFSUtilClient;
@@ -103,7 +102,7 @@ import org.slf4j.Logger;
  */
 class BlockSender implements java.io.Closeable {
   static final Logger LOG = DataNode.LOG;
-  static final Log ClientTraceLog = DataNode.ClientTraceLog;
+  static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
   private static final boolean is32Bit = 
       System.getProperty("sun.arch.data.model").equals("32");
   /**
@@ -784,7 +783,7 @@ class BlockSender implements java.io.Closeable {
     // Trigger readahead of beginning of file if configured.
     manageOsCache();
 
-    final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
+    final long startTime = CLIENT_TRACE_LOG.isDebugEnabled() ? System.nanoTime() : 0;
     try {
       int maxChunksPerPacket;
       int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
@@ -831,9 +830,9 @@ class BlockSender implements java.io.Closeable {
         sentEntireByteRange = true;
       }
     } finally {
-      if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
+      if ((clientTraceFmt != null) && CLIENT_TRACE_LOG.isDebugEnabled()) {
         final long endTime = System.nanoTime();
-        ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
+        CLIENT_TRACE_LOG.debug(String.format(clientTraceFmt, totalRead,
             initialOffset, endTime - startTime));
       }
       close();

+ 6 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -140,8 +140,6 @@ import javax.annotation.Nullable;
 import javax.management.ObjectName;
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -318,9 +316,9 @@ public class DataNode extends ReconfigurableBase
         ", srvID: %s" +  // DatanodeRegistration
         ", blockid: %s" + // block id
         ", duration(ns): %s";  // duration time
-        
-  static final Log ClientTraceLog =
-    LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
+
+  static final Logger CLIENT_TRACE_LOG =
+      LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace");
   
   private static final String USAGE =
       "Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" +
@@ -360,7 +358,7 @@ public class DataNode extends ReconfigurableBase
               FS_GETSPACEUSED_JITTER_KEY,
               FS_GETSPACEUSED_CLASSNAME));
 
-  public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
+  public static final String METRICS_LOG_NAME = "DataNodeMetricsLog";
 
   private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace.";
   private final FileIoProvider fileIoProvider;
@@ -4060,12 +4058,12 @@ public class DataNode extends ReconfigurableBase
       return;
     }
 
-    MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG);
+    MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
 
     // Schedule the periodic logging.
     metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
     metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG,
+    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
         "DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
         TimeUnit.SECONDS);
   }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
@@ -29,11 +27,14 @@ import java.util.HashMap;
 import java.util.Stack;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Class for maintain a set of lock for fsDataSetImpl.
  */
 public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetLock> {
-  public static final Log LOG = LogFactory.getLog(DataSetLockManager.class);
+  public static final Logger LOG = LoggerFactory.getLogger(DataSetLockManager.class);
   private final HashMap<String, TrackLog> threadCountMap = new HashMap<>();
   private final LockMap lockMap = new LockMap();
   private boolean isFair = true;

+ 12 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.thirdparty.protobuf.ByteString;
 import javax.crypto.SecretKey;
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSUtilClient;
@@ -105,7 +104,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
  */
 class DataXceiver extends Receiver implements Runnable {
   public static final Logger LOG = DataNode.LOG;
-  static final Log ClientTraceLog = DataNode.ClientTraceLog;
+  static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
   
   private Peer peer;
   private final String remoteAddress; // address of remote side
@@ -426,10 +425,10 @@ class DataXceiver extends Receiver implements Runnable {
             registeredSlotId);
         datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
       }
-      if (ClientTraceLog.isInfoEnabled()) {
+      if (CLIENT_TRACE_LOG.isInfoEnabled()) {
         DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
             .getBlockPoolId());
-        BlockSender.ClientTraceLog.info(String.format(
+        BlockSender.CLIENT_TRACE_LOG.info(String.format(
             "src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
             " blockid: %s, srvID: %s, success: %b",
             blk.getBlockId(), dnR.getDatanodeUuid(), success));
@@ -466,8 +465,8 @@ class DataXceiver extends Receiver implements Runnable {
       bld.build().writeDelimitedTo(socketOut);
       success = true;
     } finally {
-      if (ClientTraceLog.isInfoEnabled()) {
-        BlockSender.ClientTraceLog.info(String.format(
+      if (CLIENT_TRACE_LOG.isInfoEnabled()) {
+        BlockSender.CLIENT_TRACE_LOG.info(String.format(
             "src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
             " shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
             slotId.getShmId().getHi(), slotId.getShmId().getLo(),
@@ -526,9 +525,9 @@ class DataXceiver extends Receiver implements Runnable {
       sendShmSuccessResponse(sock, shmInfo);
       success = true;
     } finally {
-      if (ClientTraceLog.isInfoEnabled()) {
+      if (CLIENT_TRACE_LOG.isInfoEnabled()) {
         if (success) {
-          BlockSender.ClientTraceLog.info(String.format(
+          BlockSender.CLIENT_TRACE_LOG.info(String.format(
               "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
               "op: REQUEST_SHORT_CIRCUIT_SHM," +
               " shmId: %016x%016x, srvID: %s, success: true",
@@ -536,7 +535,7 @@ class DataXceiver extends Receiver implements Runnable {
               shmInfo.getShmId().getLo(),
               datanode.getDatanodeUuid()));
         } else {
-          BlockSender.ClientTraceLog.info(String.format(
+          BlockSender.CLIENT_TRACE_LOG.info(String.format(
               "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
               "op: REQUEST_SHORT_CIRCUIT_SHM, " +
               "shmId: n/a, srvID: %s, success: false",
@@ -587,13 +586,10 @@ class DataXceiver extends Receiver implements Runnable {
     BlockSender blockSender = null;
     DatanodeRegistration dnR = 
       datanode.getDNRegistrationForBP(block.getBlockPoolId());
-    final String clientTraceFmt =
-      clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
-        ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
-            "", "%d", "HDFS_READ", clientName, "%d",
-            dnR.getDatanodeUuid(), block, "%d")
-        : dnR + " Served block " + block + " to " +
-            remoteAddress;
+    final String clientTraceFmt = clientName.length() > 0 && CLIENT_TRACE_LOG.isInfoEnabled() ?
+        String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "", "%d", "HDFS_READ",
+            clientName, "%d", dnR.getDatanodeUuid(), block, "%d") :
+        dnR + " Served block " + block + " to " + remoteAddress;
 
     try {
       try {

+ 6 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -185,9 +185,6 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -405,7 +402,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private final String contextFieldSeparator;
 
   boolean isAuditEnabled() {
-    return (!isDefaultAuditLogger || auditLog.isInfoEnabled())
+    return (!isDefaultAuditLogger || AUDIT_LOG.isInfoEnabled())
         && !auditLoggers.isEmpty();
   }
 
@@ -491,8 +488,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * perm=&lt;permissions (optional)&gt;
    * </code>
    */
-  public static final Log auditLog = LogFactory.getLog(
-      FSNamesystem.class.getName() + ".audit");
+  public static final Logger AUDIT_LOG = Logger.getLogger(FSNamesystem.class.getName() + ".audit");
 
   private final int maxCorruptFileBlocksReturn;
   private final boolean isPermissionEnabled;
@@ -8783,8 +8779,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         FileStatus status, CallerContext callerContext, UserGroupInformation ugi,
         DelegationTokenSecretManager dtSecretManager) {
 
-      if (auditLog.isDebugEnabled() ||
-          (auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
+      if (AUDIT_LOG.isDebugEnabled() ||
+          (AUDIT_LOG.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
         final StringBuilder sb = STRING_BUILDER.get();
         src = escapeJava(src);
         dst = escapeJava(dst);
@@ -8853,16 +8849,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     public void logAuditMessage(String message) {
-      auditLog.info(message);
+      AUDIT_LOG.info(message);
     }
   }
 
   private static void enableAsyncAuditLog(Configuration conf) {
-    if (!(auditLog instanceof Log4JLogger)) {
-      LOG.warn("Log4j is required to enable async auditlog");
-      return;
-    }
-    Logger logger = ((Log4JLogger)auditLog).getLogger();
+    Logger logger = AUDIT_LOG;
     @SuppressWarnings("unchecked")
     List<Appender> appenders = Collections.list(logger.getAllAppenders());
     // failsafe against trying to async it more than once

+ 4 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java

@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -125,15 +122,10 @@ public class FsImageValidation {
     }
 
     static void setLogLevel(Class<?> clazz, Level level) {
-      final Log log = LogFactory.getLog(clazz);
-      if (log instanceof Log4JLogger) {
-        final org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
-        logger.setLevel(level);
-        LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}",
-            clazz.getName(), level, logger.getEffectiveLevel());
-      } else {
-        LOG.warn("Failed setLogLevel {} to {}", clazz.getName(), level);
-      }
+      final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
+      logger.setLevel(level);
+      LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
+          logger.getEffectiveLevel());
     }
 
     static String toCommaSeparatedNumber(long n) {

+ 3 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -25,8 +25,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.util.Preconditions;
 
 import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -427,8 +425,7 @@ public class NameNode extends ReconfigurableBase implements
 
   private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
 
-  public static final Log MetricsLog =
-      LogFactory.getLog("NameNodeMetricsLog");
+  public static final String METRICS_LOG_NAME = "NameNodeMetricsLog";
 
   protected FSNamesystem namesystem; 
   protected final NamenodeRole role;
@@ -949,13 +946,13 @@ public class NameNode extends ReconfigurableBase implements
       return;
     }
 
-    MetricsLoggerTask.makeMetricsLoggerAsync(MetricsLog);
+    MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
 
     // Schedule the periodic logging.
     metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
     metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
         false);
-    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(MetricsLog,
+    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
         "NameNode", (short) 128),
         metricsLoggerPeriodSec,
         metricsLoggerPeriodSec,

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java

@@ -21,8 +21,6 @@ import java.net.URI;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -41,6 +39,8 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.junit.Assert.*;
@@ -52,7 +52,7 @@ import static org.junit.Assert.*;
  */
 @RunWith(Parameterized.class)
 public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
-  public static final Log LOG = LogFactory.getLog(TestPermission.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class);
 
   private HdfsConfiguration conf;
   private MiniDFSCluster cluster;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java

@@ -190,7 +190,7 @@ public class TestDFSRename {
       Path path = new Path("/test");
       dfs.mkdirs(path);
       GenericTestUtils.LogCapturer auditLog =
-          GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
+          GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       dfs.rename(path, new Path("/dir1"),
           new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
       String auditOut = auditLog.getOutput();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -29,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 import java.io.IOException;
@@ -47,7 +47,7 @@ import static org.junit.Assert.fail;
  * Tests append on erasure coded file.
  */
 public class TestStripedFileAppend {
-  public static final Log LOG = LogFactory.getLog(TestStripedFileAppend.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestStripedFileAppend.class);
 
   static {
     DFSTestUtil.setNameNodeLogLevel(Level.TRACE);

+ 3 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java

@@ -32,8 +32,6 @@ import java.util.Random;
 import java.util.concurrent.TimeoutException;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -135,8 +133,7 @@ public class TestDataNodeMetricsLogger {
   public void testMetricsLoggerIsAsync() throws IOException {
     startDNForTest(true);
     assertNotNull(dn);
-    org.apache.log4j.Logger logger = ((Log4JLogger) DataNode.METRICS_LOG)
-        .getLogger();
+    org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
     @SuppressWarnings("unchecked")
     List<Appender> appenders = Collections.list(logger.getAllAppenders());
     assertTrue(appenders.get(0) instanceof AsyncAppender);
@@ -156,7 +153,7 @@ public class TestDataNodeMetricsLogger {
     assertNotNull(dn);
     final PatternMatchingAppender appender = new PatternMatchingAppender(
         "^.*FakeMetric.*$");
-    addAppender(DataNode.METRICS_LOG, appender);
+    addAppender(org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME), appender);
 
     // Ensure that the supplied pattern was matched.
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -169,8 +166,7 @@ public class TestDataNodeMetricsLogger {
     dn.shutdown();
   }
 
-  private void addAppender(Log log, Appender appender) {
-    org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
+  private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
     @SuppressWarnings("unchecked")
     List<Appender> appenders = Collections.list(logger.getAllAppenders());
     ((AsyncAppender) appenders.get(0)).addAppender(appender);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java

@@ -26,10 +26,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
-import org.slf4j.event.Level;
 
 import java.net.Inet4Address;
 import java.util.Arrays;
@@ -61,7 +62,7 @@ public class TestAuditLogAtDebug {
                Joiner.on(",").join(debugCommands.get()));
     }
     logger.initialize(conf);
-    GenericTestUtils.setLogLevel(FSNamesystem.auditLog, level);
+    GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, level);
     return spy(logger);
   }
   

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java

@@ -258,7 +258,7 @@ public class TestAuditLogger {
     conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40);
 
     try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
-      LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog);
+      LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       cluster.waitClusterUp();
       final FileSystem fs = cluster.getFileSystem();
       final long time = System.currentTimeMillis();
@@ -568,7 +568,7 @@ public class TestAuditLogger {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster1 = new MiniDFSCluster.Builder(conf).build();
     try {
-      LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog);
+      LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       cluster1.waitClusterUp();
       FileSystem fs = cluster1.getFileSystem();
       long time = System.currentTimeMillis();
@@ -585,7 +585,7 @@ public class TestAuditLogger {
     conf.setBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY, true);
     MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
     try {
-      LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog);
+      LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       cluster2.waitClusterUp();
       FileSystem fs = cluster2.getFileSystem();
       long time = System.currentTimeMillis();
@@ -606,7 +606,7 @@ public class TestAuditLogger {
     conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40);
 
     try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
-      LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog);
+      LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       cluster.waitClusterUp();
       final FileSystem fs = cluster.getFileSystem();
       final long time = System.currentTimeMillis();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java

@@ -93,7 +93,7 @@ public class TestAuditLoggerWithCommands {
     user2 =
         UserGroupInformation.createUserForTesting("theEngineer",
             new String[]{"hadoop"});
-    auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog);
+    auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
     proto = cluster.getNameNodeRpc();
     fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
     fs2 = DFSTestUtil.getFileSystemAs(user2, conf);

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java

@@ -32,7 +32,6 @@ import java.util.Enumeration;
 import java.util.List;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -130,7 +129,7 @@ public class TestAuditLogs {
     util.createFiles(fs, fileName);
 
     // make sure the appender is what it's supposed to be
-    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    Logger logger = FSNamesystem.AUDIT_LOG;
     @SuppressWarnings("unchecked")
     List<Appender> appenders = Collections.list(logger.getAllAppenders());
     assertEquals(1, appenders.size());
@@ -283,7 +282,7 @@ public class TestAuditLogs {
 
   /** Sets up log4j logger for auditlogs */
   private void setupAuditLogs() throws IOException {
-    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    Logger logger = FSNamesystem.AUDIT_LOG;
     // enable logging now that the test is ready to run
     logger.setLevel(Level.INFO);
   }
@@ -303,7 +302,7 @@ public class TestAuditLogs {
     disableAuditLog();
     PatternLayout layout = new PatternLayout("%m%n");
     RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
-    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    Logger logger = FSNamesystem.AUDIT_LOG;
     logger.addAppender(appender);
   }
 
@@ -319,7 +318,7 @@ public class TestAuditLogs {
     disableAuditLog();
 
     // Close the appenders and force all logs to be flushed
-    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    Logger logger = FSNamesystem.AUDIT_LOG;
     Enumeration<?> appenders = logger.getAllAppenders();
     while (appenders.hasMoreElements()) {
       Appender appender = (Appender)appenders.nextElement();
@@ -352,7 +351,7 @@ public class TestAuditLogs {
     disableAuditLog();
 
     // Close the appenders and force all logs to be flushed
-    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    Logger logger = FSNamesystem.AUDIT_LOG;
     Enumeration<?> appenders = logger.getAllAppenders();
     while (appenders.hasMoreElements()) {
       Appender appender = (Appender)appenders.nextElement();

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -61,7 +61,6 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import java.util.function.Supplier;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -252,7 +251,7 @@ public class TestFsck {
     if (file.exists()) {
       file.delete();
     }
-    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    Logger logger = FSNamesystem.AUDIT_LOG;
     logger.removeAllAppenders();
     logger.setLevel(Level.INFO);
     PatternLayout layout = new PatternLayout("%m%n");
@@ -291,7 +290,7 @@ public class TestFsck {
       if (reader != null) {
         reader.close();
       }
-      Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+      Logger logger = FSNamesystem.AUDIT_LOG;
       if (logger != null) {
         logger.removeAllAppenders();
       }

+ 3 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java

@@ -19,8 +19,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.function.Supplier;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -70,8 +68,7 @@ public class TestNameNodeMetricsLogger {
   @Test
   public void testMetricsLoggerIsAsync() throws IOException {
     makeNameNode(true);
-    org.apache.log4j.Logger logger =
-        ((Log4JLogger) NameNode.MetricsLog).getLogger();
+    org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
     @SuppressWarnings("unchecked")
     List<Appender> appenders = Collections.list(logger.getAllAppenders());
     assertTrue(appenders.get(0) instanceof AsyncAppender);
@@ -90,7 +87,7 @@ public class TestNameNodeMetricsLogger {
     makeNameNode(true);     // Log metrics early and often.
     final PatternMatchingAppender appender =
         new PatternMatchingAppender("^.*FakeMetric42.*$");
-    addAppender(NameNode.MetricsLog, appender);
+    addAppender(org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME), appender);
 
     // Ensure that the supplied pattern was matched.
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -118,8 +115,7 @@ public class TestNameNodeMetricsLogger {
     return new TestNameNode(conf);
   }
 
-  private void addAppender(Log log, Appender appender) {
-    org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
+  private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
     @SuppressWarnings("unchecked")
     List<Appender> appenders = Collections.list(logger.getAllAppenders());
     ((AsyncAppender) appenders.get(0)).addAppender(appender);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java

@@ -45,7 +45,7 @@ import java.util.function.Supplier;
  */
 public class TestDNFencingWithReplication {
   static {
-    GenericTestUtils.setLogLevel(FSNamesystem.auditLog, Level.WARN);
+    GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, org.apache.log4j.Level.WARN);
     GenericTestUtils.setLogLevel(Server.LOG, Level.ERROR);
     GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.ERROR);
   }

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java

@@ -18,18 +18,18 @@
 
 package org.apache.hadoop.mapreduce.v2.app.speculate.forecast;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.yarn.util.ControlledClock;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.yarn.util.ControlledClock;
 
 /**
  * Testing the statistical model of simple exponential estimator.
  */
 public class TestSimpleExponentialForecast {
-  private static final Log LOG =
-      LogFactory.getLog(TestSimpleExponentialForecast.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestSimpleExponentialForecast.class);
 
   private static long clockTicks = 1000L;
   private ControlledClock clock;

+ 3 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java

@@ -28,8 +28,6 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -66,6 +64,8 @@ import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test speculation on Mini Cluster.
@@ -73,8 +73,7 @@ import org.junit.runners.Parameterized;
 @Ignore
 @RunWith(Parameterized.class)
 public class TestSpeculativeExecOnCluster {
-  private static final Log LOG = LogFactory
-      .getLog(TestSpeculativeExecOnCluster.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestSpeculativeExecOnCluster.class);
 
   private static final int NODE_MANAGERS_COUNT = 2;
   private static final boolean ENABLE_SPECULATIVE_MAP = true;

+ 0 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml

@@ -132,11 +132,6 @@
       <groupId>io.netty</groupId>
       <artifactId>netty-all</artifactId>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>provided</scope>
-    </dependency>
     <dependency>
       <groupId>org.apache.hadoop.thirdparty</groupId>
       <artifactId>hadoop-shaded-guava</artifactId>

+ 0 - 4
hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml

@@ -38,10 +38,6 @@
       <groupId>commons-cli</groupId>
       <artifactId>commons-cli</artifactId>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-jobclient</artifactId>

+ 0 - 6
hadoop-project/pom.xml

@@ -121,7 +121,6 @@
     <commons-io.version>2.11.0</commons-io.version>
     <commons-lang3.version>3.12.0</commons-lang3.version>
     <commons-logging.version>1.1.3</commons-logging.version>
-    <commons-logging-api.version>1.1</commons-logging-api.version>
     <commons-math3.version>3.6.1</commons-math3.version>
     <commons-net.version>3.9.0</commons-net.version>
     <commons-text.version>1.10.0</commons-text.version>
@@ -1094,11 +1093,6 @@
           </exclusion>
         </exclusions>
       </dependency>
-      <dependency>
-        <groupId>commons-logging</groupId>
-        <artifactId>commons-logging-api</artifactId>
-        <version>${commons-logging-api.version}</version>
-      </dependency>
       <dependency>
         <groupId>log4j</groupId>
         <artifactId>log4j</artifactId>

+ 0 - 5
hadoop-tools/hadoop-archive-logs/pom.xml

@@ -101,11 +101,6 @@
       <artifactId>commons-io</artifactId>
       <scope>provided</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <scope>provided</scope>
-    </dependency>
     <dependency>
       <groupId>commons-cli</groupId>
       <artifactId>commons-cli</artifactId>

+ 3 - 3
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java

@@ -30,8 +30,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
 
@@ -39,6 +37,8 @@ import com.microsoft.azure.storage.OperationContext;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.BlobRequestOptions;
 import com.microsoft.azure.storage.blob.PageRange;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An input stream that reads file data from a page blob stored
@@ -46,7 +46,7 @@ import com.microsoft.azure.storage.blob.PageRange;
  */
 
 final class PageBlobInputStream extends InputStream {
-  private static final Log LOG = LogFactory.getLog(PageBlobInputStream.class);
+  private static final Logger LOG = LoggerFactory.getLogger(PageBlobInputStream.class);
 
   // The blob we're reading from.
   private final CloudPageBlobWrapper blob;

+ 3 - 4
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java

@@ -39,8 +39,6 @@ import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
 import org.apache.commons.lang3.exception.ExceptionUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.classification.VisibleForTesting;
@@ -48,7 +46,8 @@ import com.microsoft.azure.storage.OperationContext;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.BlobRequestOptions;
 import com.microsoft.azure.storage.blob.CloudPageBlob;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An output stream that write file data to a page blob stored using ASV's
@@ -120,7 +119,7 @@ final class PageBlobOutputStream extends OutputStream implements Syncable, Strea
   // Whether the stream has been closed.
   private boolean closed = false;
 
-  public static final Log LOG = LogFactory.getLog(AzureNativeFileSystemStore.class);
+  public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
 
   // Set the minimum page blob file size to 128MB, which is >> the default
   // block size of 32MB. This default block size is often used as the

+ 3 - 3
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java

@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 
 import org.apache.hadoop.classification.VisibleForTesting;
@@ -27,6 +25,8 @@ import org.apache.hadoop.classification.VisibleForTesting;
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -58,7 +58,7 @@ public class SelfRenewingLease {
 
   // Time to wait to renew lease in milliseconds
   public static final int LEASE_RENEWAL_PERIOD = 40000;
-  private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SelfRenewingLease.class);
 
   // Used to allocate thread serial numbers in thread name
   private static AtomicInteger threadNumber = new AtomicInteger(0);

+ 3 - 4
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.fs.azure;
 import java.net.HttpURLConnection;
 import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 import com.microsoft.azure.storage.OperationContext;
@@ -30,6 +28,8 @@ import com.microsoft.azure.storage.RequestResult;
 import com.microsoft.azure.storage.ResponseReceivedEvent;
 import com.microsoft.azure.storage.SendingRequestEvent;
 import com.microsoft.azure.storage.StorageEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /*
  * Self throttling is implemented by hooking into send & response callbacks 
@@ -63,8 +63,7 @@ import com.microsoft.azure.storage.StorageEvent;
  */
 @InterfaceAudience.Private
 public class SelfThrottlingIntercept {
-  public static final Log LOG = LogFactory
-      .getLog(SelfThrottlingIntercept.class);
+  public static final Logger LOG = LoggerFactory.getLogger(SelfThrottlingIntercept.class);
 
   private final float readFactor;
   private final float writeFactor;

+ 0 - 4
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.fs.azure;
 import java.net.HttpURLConnection;
 import java.security.InvalidKeyException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 import com.microsoft.azure.storage.Constants.HeaderConstants;
@@ -40,8 +38,6 @@ import com.microsoft.azure.storage.StorageException;
 @InterfaceAudience.Private
 public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent> {
 
-  public static final Log LOG = LogFactory.getLog(SendRequestIntercept.class);
-
   private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*";
 
   /**

+ 4 - 3
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java

@@ -20,8 +20,9 @@ package org.apache.hadoop.fs.azure;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.ProviderUtils;
@@ -32,7 +33,7 @@ import org.apache.hadoop.security.ProviderUtils;
  */
 @InterfaceAudience.Private
 public class SimpleKeyProvider implements KeyProvider {
-  private static final Log LOG = LogFactory.getLog(SimpleKeyProvider.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SimpleKeyProvider.class);
 
   protected static final String KEY_ACCOUNT_KEY_PREFIX =
       "fs.azure.account.key.";

+ 1 - 5
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.fs.azure.metrics;
 import java.util.ArrayList;
 import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 /**
@@ -31,9 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public final class BandwidthGaugeUpdater {
-  public static final Log LOG = LogFactory
-      .getLog(BandwidthGaugeUpdater.class);
-  
+
   public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater";
   
   private static final int DEFAULT_WINDOW_SIZE_MS = 1000;

+ 0 - 4
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java

@@ -20,8 +20,6 @@ package org.apache.hadoop.fs.azure.metrics;
 
 import java.net.HttpURLConnection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 import com.microsoft.azure.storage.Constants.HeaderConstants;
@@ -38,8 +36,6 @@ import com.microsoft.azure.storage.StorageEvent;
 @InterfaceAudience.Private
 public final class ResponseReceivedMetricUpdater extends StorageEvent<ResponseReceivedEvent> {
 
-  public static final Log LOG = LogFactory.getLog(ResponseReceivedMetricUpdater.class);
-
   private final AzureFileSystemInstrumentation instrumentation;
   private final BandwidthGaugeUpdater blockUploadGaugeUpdater;
 

+ 3 - 3
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java

@@ -28,7 +28,6 @@ import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -41,6 +40,8 @@ import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
@@ -70,8 +71,7 @@ public class ITestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
     fs.initialize(uri, conf);
 
     // Capture logs
-    logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
-        .getRootLogger()));
+    logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
   }
 
   /*

+ 6 - 6
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java

@@ -21,12 +21,13 @@ package org.apache.hadoop.fs.azure;
 import java.net.URI;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Logger;
+
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test to validate Azure storage client side logging. Tests works only when
@@ -94,8 +95,8 @@ public class ITestNativeAzureFileSystemClientLogging
   @Test
   public void testLoggingEnabled() throws Exception {
 
-    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
+    LogCapturer logs =
+        LogCapturer.captureLogs(LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME));
 
     // Update configuration based on the Test.
     updateFileSystemConfiguration(true);
@@ -116,8 +117,7 @@ public class ITestNativeAzureFileSystemClientLogging
   @Test
   public void testLoggingDisabled() throws Exception {
 
-    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
+    LogCapturer logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
 
     // Update configuration based on the Test.
     updateFileSystemConfiguration(false);

+ 3 - 3
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java

@@ -30,8 +30,6 @@ import java.util.Date;
 import java.util.EnumSet;
 import java.util.TimeZone;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -49,6 +47,8 @@ import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.readStringFromFile;
 import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.writeStringToFile;
@@ -73,7 +73,7 @@ public abstract class NativeAzureFileSystemBaseTest
   private static final EnumSet<XAttrSetFlag> CREATE_FLAG = EnumSet.of(XAttrSetFlag.CREATE);
   private static final EnumSet<XAttrSetFlag> REPLACE_FLAG = EnumSet.of(XAttrSetFlag.REPLACE);
 
-  public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class);
+  public static final Logger LOG = LoggerFactory.getLogger(NativeAzureFileSystemBaseTest.class);
   protected NativeAzureFileSystem fs;
 
   @Override

+ 3 - 4
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java

@@ -23,10 +23,10 @@ import java.nio.charset.Charset;
 
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
 import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException;
@@ -39,8 +39,7 @@ import static org.junit.Assert.assertEquals;
  *
  */
 public class TestShellDecryptionKeyProvider {
-  public static final Log LOG = LogFactory
-      .getLog(TestShellDecryptionKeyProvider.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestShellDecryptionKeyProvider.class);
   private static final File TEST_ROOT_DIR = new File(System.getProperty(
       "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");
 

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java

@@ -34,8 +34,6 @@ import org.apache.hadoop.yarn.appcatalog.model.Application;
 import org.apache.hadoop.yarn.appcatalog.utils.RandomWord;
 import org.apache.hadoop.yarn.appcatalog.utils.WordLengthException;
 import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrQuery.ORDER;
@@ -48,13 +46,15 @@ import org.apache.solr.common.SolrInputDocument;
 
 import com.fasterxml.jackson.databind.DeserializationFeature;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Driver class for accessing Solr.
  */
 public class AppCatalogSolrClient {
 
-  private static final Log LOG = LogFactory.getLog(AppCatalogSolrClient.class);
+  private static final Logger LOG = LoggerFactory.getLogger(AppCatalogSolrClient.class);
   private static String urlString;
 
   public AppCatalogSolrClient() {

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.appcatalog.application;
 import java.io.IOException;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.appcatalog.model.AppEntry;
@@ -39,13 +37,15 @@ import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.config.ClientConfig;
 import com.sun.jersey.api.client.config.DefaultClientConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Driver class for calling YARN Resource Manager REST API.
  */
 public class YarnServiceClient {
 
-  private static final Log LOG = LogFactory.getLog(YarnServiceClient.class);
+  private static final Logger LOG = LoggerFactory.getLogger(YarnServiceClient.class);
   private static Configuration conf = new Configuration();
   private static ClientConfig getClientConfig() {
     ClientConfig config = new DefaultClientConfig();

+ 3 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java

@@ -21,8 +21,6 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
 import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -81,6 +79,8 @@ import java.util.regex.Matcher;
 
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN;
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP;
@@ -136,8 +136,7 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
 @InterfaceStability.Unstable
 public class RuncContainerRuntime extends OCIContainerRuntime {
 
-  private static final Log LOG = LogFactory.getLog(
-      RuncContainerRuntime.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RuncContainerRuntime.class);
 
   @InterfaceAudience.Private
   private static final String RUNTIME_TYPE = "RUNC";

+ 3 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java

@@ -20,8 +20,6 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -45,6 +43,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL;
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR;
@@ -78,8 +78,7 @@ public class ImageTagToManifestPlugin extends AbstractService
   private String manifestDir;
   private String localImageTagToHashFile;
 
-  private static final Log LOG = LogFactory.getLog(
-      ImageTagToManifestPlugin.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ImageTagToManifestPlugin.class);
 
   private static final int SHA256_HASH_LENGTH = 64;
   private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+";

+ 4 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -27,11 +25,14 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * JMX bean for RM info.
  */
 public class RMInfo implements RMInfoMXBean {
-  private static final Log LOG = LogFactory.getLog(RMNMInfo.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RMNMInfo.class);
   private ResourceManager resourceManager;
   private ObjectName rmStatusBeanName;
 

+ 3 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java

@@ -22,6 +22,8 @@ import com.google.inject.Inject;
 import com.google.inject.Singleton;
 import com.sun.jersey.api.json.JSONConfiguration;
 import com.sun.jersey.api.json.JSONJAXBContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.*;
 
@@ -29,8 +31,6 @@ import javax.ws.rs.ext.ContextResolver;
 import javax.ws.rs.ext.Provider;
 import javax.xml.bind.JAXBContext;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
@@ -41,8 +41,7 @@ import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
 @Provider
 public class JAXBContextResolver implements ContextResolver<JAXBContext> {
 
-  private static final Log LOG =
-      LogFactory.getLog(JAXBContextResolver.class.getName());
+  private static final Logger LOG = LoggerFactory.getLogger(JAXBContextResolver.class.getName());
 
   private final Map<Class, JAXBContext> typesContextMap;
 

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java

@@ -22,8 +22,6 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.C
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -49,14 +47,16 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
 public class TestCapacitySchedulerMultiNodesWithPreemption {
 
-  private static final Log LOG = LogFactory
-      .getLog(TestCapacitySchedulerMultiNodesWithPreemption.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestCapacitySchedulerMultiNodesWithPreemption.class);
   private CapacitySchedulerConfiguration conf;
   private static final String POLICY_CLASS_NAME =
       "org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement."

+ 16 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml

@@ -104,6 +104,10 @@
           <groupId>com.sun.jersey</groupId>
           <artifactId>jersey-json</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -336,6 +340,10 @@
           <groupId>com.sun.jersey</groupId>
           <artifactId>jersey-json</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -351,6 +359,10 @@
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-hdfs-client</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -367,6 +379,10 @@
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-hdfs-client</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 

+ 7 - 0
pom.xml

@@ -288,6 +288,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
                       <bannedImport>org.glassfish.grizzly.**</bannedImport>
                     </bannedImports>
                   </restrictImports>
+                  <restrictImports>
+                    <includeTestCode>true</includeTestCode>
+                    <reason>Use slf4j based Logger</reason>
+                    <bannedImports>
+                      <bannedImport>org.apache.commons.logging.**</bannedImport>
+                    </bannedImports>
+                  </restrictImports>
                 </rules>
               </configuration>
             </execution>