Browse Source

HADOOP-4508. Fix FSDataOutputStream.getPos() for append. (dhruba via szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@723468 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 16 years ago
parent
commit
1b5a3f7297

+ 3 - 0
CHANGES.txt

@@ -280,6 +280,9 @@ Release 0.19.1 - Unreleased
     HADOOP-4632. Fix TestJobHistoryVersion to use test.build.dir instead of the
     current workding directory for scratch space. (Amar Kamat via cdouglas)
 
+    HADOOP-4508. Fix FSDataOutputStream.getPos() for append. (dhruba via
+    szetszwo)
+
 Release 0.19.0 - 2008-11-18
 
   INCOMPATIBLE CHANGES

+ 9 - 2
src/core/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -30,9 +30,11 @@ public class FSDataOutputStream extends DataOutputStream implements Syncable {
     long position;
 
     public PositionCache(OutputStream out, 
-                         FileSystem.Statistics stats) throws IOException {
+                         FileSystem.Statistics stats,
+                         long pos) throws IOException {
       super(out);
       statistics = stats;
+      position = pos;
     }
 
     public void write(int b) throws IOException {
@@ -67,7 +69,12 @@ public class FSDataOutputStream extends DataOutputStream implements Syncable {
 
   public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats)
     throws IOException {
-    super(new PositionCache(out, stats));
+    this(out, stats, 0);
+  }
+
+  public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats,
+                            long startPosition) throws IOException {
+    super(new PositionCache(out, stats, startPosition));
     wrappedStream = out;
   }
   

+ 9 - 0
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -2024,6 +2024,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     private int recoveryErrorCount = 0; // number of times block recovery failed
     private int maxRecoveryErrorCount = 5; // try block recovery 5 times
     private volatile boolean appendChunk = false;   // appending to existing partial block
+    private long initialFileSize = 0; // at time of file open
 
     private void setLastException(IOException e) {
       if (lastException == null) {
@@ -2600,6 +2601,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
         LocatedBlock lastBlock, FileStatus stat,
         int bytesPerChecksum) throws IOException {
       this(src, stat.getBlockSize(), progress, bytesPerChecksum);
+      initialFileSize = stat.getLen(); // length of file when opened
 
       //
       // The last partial block of the file has to be filled.
@@ -3155,6 +3157,13 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     synchronized void setTestFilename(String newname) {
       src = newname;
     }
+
+    /**
+     * Returns the size of a file as it was when this stream was opened
+     */
+    long getInitialLen() {
+      return initialFileSize;
+    }
   }
 
   void reportChecksumFailure(String file, Block blk, DatanodeInfo dn) {

+ 3 - 2
src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
 import org.apache.hadoop.util.*;
 
 
@@ -157,8 +158,8 @@ public class DistributedFileSystem extends FileSystem {
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
 
-    return new FSDataOutputStream(
-        dfs.append(getPathName(f), bufferSize, progress), statistics);
+    DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
+    return new FSDataOutputStream(op, statistics, op.getInitialLen());
   }
 
   public FSDataOutputStream create(Path f, FsPermission permission,

+ 4 - 0
src/test/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -156,6 +156,10 @@ public class TestFileAppend2 extends TestCase {
   
         // write the remainder of the file
         stm = fs.append(file1);
+
+        // ensure getPos is set to reflect existing size of the file
+        assertTrue(stm.getPos() > 0);
+
         System.out.println("Writing " + (fileSize - mid2) + " bytes to file " + file1);
         stm.write(fileContents, mid2, fileSize - mid2);
         System.out.println("Written second part of file");