Bladeren bron

Revert HDFS-3492 from r1347192: patch broke TestShortCircuitLocalRead

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1347797 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 jaren geleden
bovenliggende
commit
4db45a7f7a

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java

@@ -42,7 +42,10 @@ public class InputStreamEntity implements StreamingOutput {
 
   @Override
   public void write(OutputStream os) throws IOException {
-    IOUtils.skipFully(is, offset);
+    long skipped = is.skip(offset);
+    if (skipped < offset) {
+      throw new IOException("Requested offset beyond stream size");
+    }
     if (len == -1) {
       IOUtils.copyBytes(is, os, 4096, true);
     } else {

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -159,9 +159,6 @@ Release 2.0.1-alpha - UNRELEASED
     HDFS-3505. DirectoryScanner does not join all threads in shutdown.
     (Colin Patrick McCabe via eli)
 
-    HDFS-3492. Fix some misuses of InputStream#skip (Colin Patrick McCabe
-    via todd)
-
     HDFS-3485. DataTransferThrottler will over-throttle when currentTimeMillis
     jumps (Andy Isaacson via todd)
 

+ 27 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.util.DirectBufferPool;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 
@@ -285,11 +284,24 @@ class BlockReaderLocal implements BlockReader {
     //Initially the buffers have nothing to read.
     dataBuff.flip();
     checksumBuff.flip();
-    IOUtils.skipFully(dataIn, firstChunkOffset);
+    long toSkip = firstChunkOffset;
+    while (toSkip > 0) {
+      long skipped = dataIn.skip(toSkip);
+      if (skipped == 0) {
+        throw new IOException("Couldn't initialize input stream");
+      }
+      toSkip -= skipped;
+    }
     if (checksumIn != null) {
       long checkSumOffset = (firstChunkOffset / bytesPerChecksum)
           * checksumSize;
-      IOUtils.skipFully(dataIn, checkSumOffset);
+      while (checkSumOffset > 0) {
+        long skipped = checksumIn.skip(checkSumOffset);
+        if (skipped == 0) {
+          throw new IOException("Couldn't initialize checksum input stream");
+        }
+        checkSumOffset -= skipped;
+      }
     }
   }
 
@@ -395,9 +407,17 @@ class BlockReaderLocal implements BlockReader {
     dataBuff.clear();
     checksumBuff.clear();
   
-    IOUtils.skipFully(dataIn, toskip);
-    long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize;
-    IOUtils.skipFully(checksumIn, checkSumOffset);
+    long dataSkipped = dataIn.skip(toskip);
+    if (dataSkipped != toskip) {
+      throw new IOException("skip error in data input stream");
+    }
+    long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize;
+    if (checkSumOffset > 0) {
+      long skipped = checksumIn.skip(checkSumOffset);
+      if (skipped != checkSumOffset) {
+        throw new IOException("skip error in checksum input stream");
+      }
+    }
 
     // read into the middle of the chunk
     if (skipBuf == null) {
@@ -450,4 +470,4 @@ class BlockReaderLocal implements BlockReader {
   public boolean hasSentStatusCode() {
     return false;
   }
-}
+}

+ 2 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -44,7 +44,6 @@ import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.ArrayWritable;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -2290,11 +2289,9 @@ public abstract class FSEditLogOp {
           // 0xff, we want to skip over that region, because there's nothing
           // interesting there.
           long numSkip = e.getNumAfterTerminator();
-          try {
-            IOUtils.skipFully(in, numSkip);
-          } catch (IOException t) {
+          if (in.skip(numSkip) < numSkip) {
             FSImage.LOG.error("Failed to skip " + numSkip + " bytes of " +
-              "garbage after an OP_INVALID.  Unexpected early EOF.", t);
+              "garbage after an OP_INVALID.  Unexpected early EOF.");
             return null;
           }
         } catch (IOException e) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
@@ -93,7 +92,8 @@ public class TestShortCircuitLocalRead {
     // Now read using a different API.
     actual = new byte[expected.length-readOffset];
     stm = fs.open(name);
-    IOUtils.skipFully(stm, readOffset);
+    long skipped = stm.skip(readOffset);
+    Assert.assertEquals(skipped, readOffset);
     //Read a small number of bytes first.
     int nread = stm.read(actual, 0, 3);
     nread += stm.read(actual, nread, 2);

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -687,7 +686,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   public synchronized InputStream getBlockInputStream(ExtendedBlock b,
       long seekOffset) throws IOException {
     InputStream result = getBlockInputStream(b);
-    IOUtils.skipFully(result, seekOffset);
+    result.skip(seekOffset);
     return result;
   }