瀏覽代碼

HADOOP-1443. Fix a bug opening zero-length files in HDFS. Contributed by Konstantin.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@544181 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 年之前
父節點
當前提交
134cea5b06

+ 3 - 0
CHANGES.txt

@@ -519,6 +519,9 @@ Branch 0.13 (unreleased changes)
      to a long, permitting map outputs to exceed 2^31 bytes.
      (omalley via cutting)
 
+133. HADOOP-1443.  Fix a bug opening zero-length files in HDFS.
+     (Konstantin Shvachko via cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

+ 1 - 1
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -961,7 +961,7 @@ class DFSClient implements FSConstants {
         throw new IOException("Stream closed");
       }
       long filelen = getFileLength();
-      if ((position < 0) || (position > filelen)) {
+      if ((position < 0) || (position >= filelen)) {
         return -1;
       }
       int realLen = length;

+ 17 - 3
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -432,7 +432,14 @@ class FSNamesystem implements FSConstants {
   synchronized LocatedBlocks  getBlockLocations(String clientMachine,
                                                 String src, 
                                                 long offset, 
-                                                long length) {
+                                                long length
+                                                ) throws IOException {
+    if (offset < 0) {
+      throw new IOException("Negative offset is not supported. File: " + src );
+    }
+    if (length < 0) {
+      throw new IOException("Negative length is not supported. File: " + src );
+    }
     return  getBlockLocations(clientMachine, 
                               dir.getFileINode(src), 
                               offset, length, Integer.MAX_VALUE);
@@ -442,7 +449,8 @@ class FSNamesystem implements FSConstants {
                                           FSDirectory.INode inode, 
                                           long offset, 
                                           long length,
-                                          int nrBlocksToReturn) {
+                                          int nrBlocksToReturn
+                                          ) throws IOException {
     if(inode == null || inode.isDir()) {
       return null;
     }
@@ -450,19 +458,25 @@ class FSNamesystem implements FSConstants {
     if (blocks == null) {
       return null;
     }
+    assert blocks.length > 0 : "Array of blocks is empty.";
     List<LocatedBlock> results;
     results = new ArrayList<LocatedBlock>(blocks.length);
 
     int curBlk = 0;
     long curPos = 0, blkSize = 0;
-    for (curBlk = 0; curBlk < blocks.length; curBlk++) {
+    int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
+    for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
       blkSize = blocks[curBlk].getNumBytes();
+      assert blkSize > 0 : "Block of size 0";
       if (curPos + blkSize > offset) {
         break;
       }
       curPos += blkSize;
     }
     
+    if (nrBlocks > 0 && curBlk == nrBlocks)   // offset >= end of file
+      return null;
+    
     long endOff = offset + length;
     
     DatanodeDescriptor client;

+ 29 - 0
src/test/org/apache/hadoop/dfs/TestPread.java

@@ -38,7 +38,25 @@ public class TestPread extends TestCase {
     // create and write a file that contains three blocks of data
     DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
                                           (long)blockSize);
+    // test empty file open and read
+    stm.close();
+    FSDataInputStream in = fileSys.open(name);
     byte[] buffer = new byte[(int)(12*blockSize)];
+    in.readFully(0, buffer, 0, 0);
+    IOException res = null;
+    try { // read beyond the end of the file
+      in.readFully(0, buffer, 0, 1);
+    } catch (IOException e) {
+      // should throw an exception
+      res = e;
+    }
+    assertTrue("Error reading beyond file boundary.", res != null);
+    in.close();
+    if (!fileSys.delete(name))
+      assertTrue("Cannot delete file", false);
+    
+    // now create the real file
+    stm = fileSys.create(name, true, 4096, (short)1, (long)blockSize);
     Random rand = new Random(seed);
     rand.nextBytes(buffer);
     stm.write(buffer);
@@ -112,6 +130,17 @@ public class TestPread extends TestCase {
     actual = new byte[8*4096];
     stm.readFully(3*blockSize, actual, 0, 8*4096);
     checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
+    // read the tail
+    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
+    IOException res = null;
+    try { // read beyond the end of the file
+      stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
+    } catch (IOException e) {
+      // should throw an exception
+      res = e;
+    }
+    assertTrue("Error reading beyond file boundary.", res != null);
+    
     stm.close();
   }