浏览代码

HADOOP-8491. Check for short writes when using FileChannel#write and related methods. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1349025 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 13 年之前
父节点
当前提交
9d83af831b

+ 6 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -80,6 +80,12 @@ Release 2.0.1-alpha - UNRELEASED
 
     HADOOP-8485. Don't hardcode "Apache Hadoop 0.23" in the docs. (eli)
 
+    HADOOP-8488. test-patch.sh gives +1 even if the native build fails.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8491. Check for short writes when using FileChannel#write
+    and related methods. (Colin Patrick McCabe via eli)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@@ -112,9 +118,6 @@ Release 2.0.1-alpha - UNRELEASED
     
     HADOOP-8405. ZKFC tests leak ZK instances. (todd)
 
-    HADOOP-8488. test-patch.sh gives +1 even if the native build
-    fails. (Colin Patrick McCabe via eli)
-
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 import java.net.Socket;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.WritableByteChannel;
 
 import org.apache.commons.logging.Log;
 
@@ -245,4 +248,35 @@ public class IOUtils {
     public void write(int b) throws IOException {
     }
   }  
+  
+  /**
+   * Write a ByteBuffer to a WritableByteChannel, handling short writes.
+   * 
+   * @param bc               The WritableByteChannel to write to.
+   * @param buf              The input buffer
+   * @param offset           The offset in the file to start writing at.
+   * @throws IOException     On I/O error.
+   */
+  public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
+      throws IOException {
+    do {
+      bc.write(buf);
+    } while (buf.remaining() > 0);
+  }
+
+  /**
+   * Write a ByteBuffer to a FileChannel at a given offset, 
+   * handling short writes.
+   * 
+   * @param fc               The FileChannel to write to.
+   * @param buf              The input buffer
+   * @param offset           The offset in the file to start writing at.
+   * @throws IOException     On I/O error.
+   */
+  public static void writeFully(FileChannel fc, ByteBuffer buf,
+      long offset) throws IOException {
+    do {
+      offset += fc.write(buf, offset);
+    } while (buf.remaining() > 0);
+  }
 }

+ 43 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java

@@ -21,9 +21,13 @@ package org.apache.hadoop.io;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -32,7 +36,8 @@ import org.mockito.Mockito;
  * Test cases for IOUtils.java
  */
 public class TestIOUtils {
-
+  private static final String TEST_FILE_NAME = "test_file";
+  
   @Test
   public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
     InputStream inputStream = Mockito.mock(InputStream.class);
@@ -110,4 +115,41 @@ public class TestIOUtils {
     Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
   }
   
+  @Test
+  public void testWriteFully() throws IOException {
+    final int INPUT_BUFFER_LEN = 10000;
+    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
+    byte[] input = new byte[INPUT_BUFFER_LEN];
+    for (int i = 0; i < input.length; i++) {
+      input[i] = (byte)(i & 0xff);
+    }
+    byte[] output = new byte[input.length];
+    
+    try {
+      RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
+      FileChannel fc = raf.getChannel();
+      ByteBuffer buf = ByteBuffer.wrap(input);
+      IOUtils.writeFully(fc, buf);
+      raf.seek(0);
+      raf.read(output);
+      for (int i = 0; i < input.length; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      buf.rewind();
+      IOUtils.writeFully(fc, buf, HALFWAY);
+      for (int i = 0; i < HALFWAY; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      raf.seek(0);
+      raf.read(output);
+      for (int i = HALFWAY; i < input.length; i++) {
+        assertEquals(input[i - HALFWAY], output[i]);
+      }
+    } finally {
+      File f = new File(TEST_FILE_NAME);
+      if (f.exists()) {
+        f.delete();
+      }
+    }
+  }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java

@@ -206,10 +206,10 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
             + fc.size());
       }
       fill.position(0);
-      int written = fc.write(fill, position);
+      IOUtils.writeFully(fc, fill, position);
       if(FSNamesystem.LOG.isDebugEnabled()) {
         FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
-            " written " + written + " bytes " + " at offset " + position);
+            " written " + fill.capacity() + " bytes " + " at offset " + position);
       }
     }
   }