Pārlūkot izejas kodu

Revert HADOOP-8491

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1349133 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 13 gadi atpakaļ
vecāks
revīzija
797c8548d0

+ 3 - 6
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -242,12 +242,6 @@ Branch-2 ( Unreleased changes )
 
     HADOOP-8485. Don't hardcode "Apache Hadoop 0.23" in the docs. (eli)
 
-    HADOOP-8488. test-patch.sh gives +1 even if the native build fails.
-    (Colin Patrick McCabe via eli)
-
-    HADOOP-8491. Check for short writes when using FileChannel#write
-    and related methods. (Colin Patrick McCabe via eli)
-
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@@ -280,6 +274,9 @@ Branch-2 ( Unreleased changes )
     
     HADOOP-8405. ZKFC tests leak ZK instances. (todd)
 
+    HADOOP-8488. test-patch.sh gives +1 even if the native build fails.
+    (Colin Patrick McCabe via eli)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES

+ 0 - 34
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -20,9 +20,6 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 import java.net.Socket;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.WritableByteChannel;
 
 import org.apache.commons.logging.Log;
 
@@ -248,35 +245,4 @@ public class IOUtils {
     public void write(int b) throws IOException {
     }
   }  
-  
-  /**
-   * Write a ByteBuffer to a WritableByteChannel, handling short writes.
-   * 
-   * @param bc               The WritableByteChannel to write to.
-   * @param buf              The input buffer
-   * @param offset           The offset in the file to start writing at.
-   * @throws IOException     On I/O error.
-   */
-  public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
-      throws IOException {
-    do {
-      bc.write(buf);
-    } while (buf.remaining() > 0);
-  }
-
-  /**
-   * Write a ByteBuffer to a FileChannel at a given offset, 
-   * handling short writes.
-   * 
-   * @param fc               The FileChannel to write to.
-   * @param buf              The input buffer
-   * @param offset           The offset in the file to start writing at.
-   * @throws IOException     On I/O error.
-   */
-  public static void writeFully(FileChannel fc, ByteBuffer buf,
-      long offset) throws IOException {
-    do {
-      offset += fc.write(buf, offset);
-    } while (buf.remaining() > 0);
-  }
 }

+ 1 - 43
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java

@@ -21,13 +21,9 @@ package org.apache.hadoop.io;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
 
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -36,8 +32,7 @@ import org.mockito.Mockito;
  * Test cases for IOUtils.java
  */
 public class TestIOUtils {
-  private static final String TEST_FILE_NAME = "test_file";
-  
+
   @Test
   public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
     InputStream inputStream = Mockito.mock(InputStream.class);
@@ -115,41 +110,4 @@ public class TestIOUtils {
     Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
   }
   
-  @Test
-  public void testWriteFully() throws IOException {
-    final int INPUT_BUFFER_LEN = 10000;
-    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
-    byte[] input = new byte[INPUT_BUFFER_LEN];
-    for (int i = 0; i < input.length; i++) {
-      input[i] = (byte)(i & 0xff);
-    }
-    byte[] output = new byte[input.length];
-    
-    try {
-      RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
-      FileChannel fc = raf.getChannel();
-      ByteBuffer buf = ByteBuffer.wrap(input);
-      IOUtils.writeFully(fc, buf);
-      raf.seek(0);
-      raf.read(output);
-      for (int i = 0; i < input.length; i++) {
-        assertEquals(input[i], output[i]);
-      }
-      buf.rewind();
-      IOUtils.writeFully(fc, buf, HALFWAY);
-      for (int i = 0; i < HALFWAY; i++) {
-        assertEquals(input[i], output[i]);
-      }
-      raf.seek(0);
-      raf.read(output);
-      for (int i = HALFWAY; i < input.length; i++) {
-        assertEquals(input[i - HALFWAY], output[i]);
-      }
-    } finally {
-      File f = new File(TEST_FILE_NAME);
-      if (f.exists()) {
-        f.delete();
-      }
-    }
-  }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java

@@ -206,10 +206,10 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
             + fc.size());
       }
       fill.position(0);
-      IOUtils.writeFully(fc, fill, position);
+      int written = fc.write(fill, position);
       if(FSNamesystem.LOG.isDebugEnabled()) {
         FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
-            " written " + fill.capacity() + " bytes " + " at offset " + position);
+            " written " + written + " bytes " + " at offset " + position);
       }
     }
   }