瀏覽代碼

HDFS-6529. Trace logging for RemoteBlockReader2 to identify remote datanode and file being read. Contributed by Anubhav Dhoot.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1602538 13f79535-47bb-0310-9956-ffa450edef68
Aaron Myers 11 年之前
父節點
當前提交
3f5e04946d

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -440,6 +440,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6470. TestBPOfferService.testBPInitErrorHandling is flaky.
     (Ming Ma via wang)
 
+    HDFS-6529. Trace logging for RemoteBlockReader2 to identify remote datanode
+    and file being read. (Anubhav Dhoot via atm)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.nio.channels.ReadableByteChannel;
 import java.util.EnumSet;
+import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -133,9 +134,22 @@ public class RemoteBlockReader2  implements BlockReader {
   public synchronized int read(byte[] buf, int off, int len) 
                                throws IOException {
 
+    UUID randomId = null;
+    if (LOG.isTraceEnabled()) {
+      randomId = UUID.randomUUID();
+      LOG.trace(String.format("Starting read #%s file %s from datanode %s",
+        randomId.toString(), this.filename,
+        this.datanodeID.getHostName()));
+    }
+
     if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
       readNextPacket();
     }
+
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Finishing read #" + randomId));
+    }
+
     if (curDataSlice.remaining() == 0) {
       // we're at EOF now
       return -1;