Browse Source

HDFS-13849. Migrate logging to slf4j in hadoop-hdfs-httpfs, hadoop-hdfs-nfs, hadoop-hdfs-rbf, hadoop-hdfs-native-client. Contributed by Ian Pickering.

Giovanni Matteo Fumarola 6 years ago
parent
commit
7b1fa5693e

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/TestFuseDFS.java

@@ -22,8 +22,8 @@ import java.util.ArrayList;
 import java.util.concurrent.atomic.*;
 
 import org.apache.log4j.Level;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.*;
@@ -48,7 +48,7 @@ public class TestFuseDFS {
   private static Runtime r;
   private static String mountPoint;
 
-  private static final Log LOG = LogFactory.getLog(TestFuseDFS.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestFuseDFS.class);
   {
     GenericTestUtils.setLogLevel(LOG, Level.ALL);
   }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -26,8 +26,8 @@ import java.util.Collections;
 import java.util.List;
 import java.util.HashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
@@ -61,7 +61,8 @@ import com.google.common.annotations.VisibleForTesting;
  * RPC program corresponding to mountd daemon. See {@link Mountd}.
  */
 public class RpcProgramMountd extends RpcProgram implements MountInterface {
-  private static final Log LOG = LogFactory.getLog(RpcProgramMountd.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RpcProgramMountd.class);
   public static final int PROGRAM = 100005;
   public static final int VERSION_1 = 1;
   public static final int VERSION_2 = 2;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java

@@ -22,8 +22,8 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is a thread pool to easily schedule async data operations. Current
@@ -31,7 +31,7 @@ import org.apache.commons.logging.LogFactory;
  * for readahead operations too.
  */
 public class AsyncDataService {
-  static final Log LOG = LogFactory.getLog(AsyncDataService.class);
+  static final Logger LOG = LoggerFactory.getLogger(AsyncDataService.class);
 
   // ThreadPool core pool size
   private static final int CORE_THREADS_PER_VOLUME = 1;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -1211,11 +1211,11 @@ class OpenFileCtx {
 
       LOG.info("Clean up open file context for fileId: {}",
           latestAttr.getFileId());
-      cleanup();
+      cleanupWithLogger();
     }
   }
 
-  synchronized void cleanup() {
+  synchronized void cleanupWithLogger() {
     if (!activeState) {
       LOG.info("Current OpenFileCtx is already inactive, no need to cleanup.");
       return;

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java

@@ -22,8 +22,8 @@ import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
@@ -39,7 +39,8 @@ import com.google.common.collect.Maps;
  * used to maintain the writing context for a single file.
  */
 class OpenFileCtxCache {
-  private static final Log LOG = LogFactory.getLog(OpenFileCtxCache.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpenFileCtxCache.class);
   // Insert and delete with openFileMap are synced
   private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps
       .newConcurrentMap();
@@ -138,7 +139,7 @@ class OpenFileCtxCache {
     
     // Cleanup the old stream outside the lock
     if (toEvict != null) {
-      toEvict.cleanup();
+      toEvict.cleanupWithLogger();
     }
     return true;
   }
@@ -178,7 +179,7 @@ class OpenFileCtxCache {
 
     // Invoke the cleanup outside the lock
     for (OpenFileCtx ofc : ctxToRemove) {
-      ofc.cleanup();
+      ofc.cleanupWithLogger();
     }
   }
 
@@ -214,7 +215,7 @@ class OpenFileCtxCache {
 
     // Invoke the cleanup outside the lock
     for (OpenFileCtx ofc : cleanedContext) {
-      ofc.cleanup();
+      ofc.cleanupWithLogger();
     }
   }
 

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java

@@ -22,8 +22,8 @@ import java.net.SocketException;
 
 import org.apache.commons.daemon.Daemon;
 import org.apache.commons.daemon.DaemonContext;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 
@@ -37,7 +37,8 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
  * Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880
  */
 public class PrivilegedNfsGatewayStarter implements Daemon {
-  static final Log LOG = LogFactory.getLog(PrivilegedNfsGatewayStarter.class);
+  static final Logger LOG =
+      LoggerFactory.getLogger(PrivilegedNfsGatewayStarter.class);
   private String[] args = null;
   private DatagramSocket registrationSocket = null;
   private Nfs3 nfs3Server = null;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java

@@ -22,8 +22,8 @@ import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
@@ -37,7 +37,7 @@ import com.google.common.base.Preconditions;
  * xid and reply status.
  */
 class WriteCtx {
-  public static final Log LOG = LogFactory.getLog(WriteCtx.class);
+  public static final Logger LOG = LoggerFactory.getLogger(WriteCtx.class);
   
   /**
    * In memory write data has 3 states. ALLOW_DUMP: not sequential write, still

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 import java.io.IOException;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -51,7 +51,7 @@ import com.google.common.annotations.VisibleForTesting;
  * Manage the writes and responds asynchronously.
  */
 public class WriteManager {
-  public static final Log LOG = LogFactory.getLog(WriteManager.class);
+  public static final Logger LOG = LoggerFactory.getLogger(WriteManager.class);
 
   private final NfsConfiguration config;
   private final IdMappingServiceProvider iug;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.nfs;
 import java.io.IOException;
 import java.net.InetAddress;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
@@ -35,7 +35,7 @@ import static org.junit.Assert.assertTrue;
 
 public class TestMountd {
 
-  public static final Log LOG = LogFactory.getLog(TestMountd.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestMountd.class);
 
   @Test
   public void testStart() throws IOException {

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.nfs;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils;
@@ -51,7 +51,8 @@ import org.jboss.netty.channel.Channels;
 import org.jboss.netty.channel.MessageEvent;
 
 public class TestOutOfOrderWrite {
-  public final static Log LOG = LogFactory.getLog(TestOutOfOrderWrite.class);
+  public final static Logger LOG =
+      LoggerFactory.getLogger(TestOutOfOrderWrite.class);
 
   static FileHandle handle = null;
   static Channel channel;
@@ -179,4 +180,4 @@ public class TestOutOfOrderWrite {
 
     // TODO: convert to Junit test, and validate result automatically
   }
-}
+}

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterPermissionChecker.java

@@ -21,8 +21,8 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
@@ -35,7 +35,8 @@ import org.apache.hadoop.security.UserGroupInformation;
  * Class that helps in checking permissions in Router-based federation.
  */
 public class RouterPermissionChecker extends FSPermissionChecker {
-  static final Log LOG = LogFactory.getLog(RouterPermissionChecker.class);
+  static final Logger LOG =
+      LoggerFactory.getLogger(RouterPermissionChecker.class);
 
   /** Mount table default permission. */
   public static final short MOUNT_TABLE_PERMISSION_DEFAULT = 00755;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.federation.store;
 
 import java.lang.reflect.Constructor;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 @InterfaceStability.Evolving
 public abstract class RecordStore<R extends BaseRecord> {
 
-  private static final Log LOG = LogFactory.getLog(RecordStore.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RecordStore.class);
 
 
   /** Class of the record stored in this State Store. */