Browse Source

HADOOP-13017. Implementations of InputStream.read(buffer, offset, bytes) to exit 0 if bytes==0. Contributed by Steve Loughran.

(cherry picked from commit 0bdd263d82a4510f16df49238d57c9f78ac28ae7)
Masatake Iwasaki 8 years ago
parent
commit
6d9b6fac86

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -968,6 +968,9 @@ public class HarFileSystem extends FileSystem {
       @Override
       public synchronized int read(byte[] b, int offset, int len) 
         throws IOException {
+        if (len == 0) {
+          return 0;
+        }
         int newlen = len;
         int ret = -1;
         if (position + len > end) {

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java

@@ -246,6 +246,9 @@ public class SaslInputStream extends InputStream implements ReadableByteChannel
    */
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
+    if (len == 0) {
+      return 0;
+    }
     if (!useWrap) {
       return inStream.read(b, off, len);
     }
@@ -378,4 +381,4 @@ public class SaslInputStream extends InputStream implements ReadableByteChannel
     }
     return bytesRead;
   }
-}
+}

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java

@@ -569,6 +569,9 @@ public class SaslRpcClient {
 
     @Override
     public synchronized int read(byte[] buf, int off, int len) throws IOException {
+      if (len == 0) {
+        return 0;
+      }
       // fill the buffer with the next RPC message
       if (unwrappedRpcBuffer.remaining() == 0) {
         readNextRpcPacket();

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java

@@ -74,6 +74,9 @@ public final class LimitInputStream extends FilterInputStream {
 
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
+    if (len == 0) {
+      return 0;
+    }
     if (left == 0) {
       return -1;
     }

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -1795,6 +1795,9 @@ public class WebHdfsFileSystem extends FileSystem
       if (runnerState == RunnerState.CLOSED) {
         throw new IOException("Stream closed");
       }
+      if (len == 0) {
+        return 0;
+      }
 
       // Before the first read, pos and fileLength will be 0 and readBuffer
       // will all be null. They will be initialized once the first connection

+ 3 - 0
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java

@@ -84,6 +84,9 @@ public class ThrottledInputStream extends InputStream {
   /** {@inheritDoc} */
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
+    if (len == 0) {
+      return 0;
+    }
     throttle();
     int readLen = rawStream.read(b, off, len);
     if (readLen != -1) {

+ 3 - 0
hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java

@@ -187,6 +187,9 @@ public class HttpInputStreamWithRelease extends InputStream {
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
     SwiftUtils.validateReadArgs(b, off, len);
+    if (len == 0) {
+      return 0;
+    }
     //if the stream is already closed, then report an exception.
     assumeNotReleased();
     //now read in a buffer, reacting differently to different operations

+ 3 - 0
hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeInputStream.java

@@ -161,6 +161,9 @@ class SwiftNativeInputStream extends FSInputStream {
   public synchronized int read(byte[] b, int off, int len) throws IOException {
     SwiftUtils.debug(LOG, "read(buffer, %d, %d)", off, len);
     SwiftUtils.validateReadArgs(b, off, len);
+    if (len == 0) {
+      return 0;
+    }
     int result = -1;
     try {
       verifyOpen();