Selaa lähdekoodia

HADOOP-11510. Expose truncate API via FileContext. (yliu)

yliu 10 vuotta sitten
vanhempi
commit
1b56d1ce32

+ 2 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -411,6 +411,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins testing
     job. (Yongjun Zhang and Todd Lipcon via ozawa)
 
+    HADOOP-11510. Expose truncate API via FileContext. (yliu)
+
   IMPROVEMENTS
 
     HADOOP-11483. HardLink.java should use the jdk7 createLink method (aajisaka)

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -637,6 +637,15 @@ public abstract class AbstractFileSystem {
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
 
+  /**
+   * The specification of this method matches that of
+   * {@link FileContext#truncate(Path, long)} except that Path f must be for
+   * this file system.
+   */
+  public abstract boolean truncate(Path f, long newLength)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException;
+
   /**
    * The specification of this method matches that of
    * {@link FileContext#setReplication(Path, short)} except that Path f must be

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
 
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+    throw new IOException("Not supported");
+  }
+
   /**
    * Opens an FSDataInputStream at the indicated Path.
    * @param f the file name to open

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -169,6 +169,12 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
     return fsImpl.open(f, bufferSize);
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+    checkPath(f);
+    return fsImpl.truncate(f, newLength);
+  }
+
   @Override
   @SuppressWarnings("deprecation") // call to rename
   public void renameInternal(Path src, Path dst) throws IOException {

+ 43 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -818,6 +818,49 @@ public class FileContext {
     }.resolve(this, absF);
   }
 
+  /**
+   * Truncate the file in the indicated path to the indicated size.
+   * <ul>
+   * <li>Fails if path is a directory.
+   * <li>Fails if path does not exist.
+   * <li>Fails if path is not closed.
+   * <li>Fails if new size is greater than current size.
+   * </ul>
+   * @param f The path to the file to be truncated
+   * @param newLength The size the file is to be truncated to
+   *
+   * @return <code>true</code> if the file has been truncated to the desired
+   * <code>newLength</code> and is immediately available to be reused for
+   * write operations such as <code>append</code>, or
+   * <code>false</code> if a background process of adjusting the length of
+   * the last block has been started, and clients should wait for it to
+   * complete before proceeding with further file updates.
+   *
+   * @throws AccessControlException If access is denied
+   * @throws FileNotFoundException If file <code>f</code> does not exist
+   * @throws UnsupportedFileSystemException If file system for <code>f</code> is
+   *           not supported
+   * @throws IOException If an I/O error occurred
+   *
+   * Exceptions applicable to file systems accessed over RPC:
+   * @throws RpcClientException If an exception occurred in the RPC client
+   * @throws RpcServerException If an exception occurred in the RPC server
+   * @throws UnexpectedServerException If server implementation throws
+   *           undeclared exception to RPC server
+   */
+  public boolean truncate(final Path f, final long newLength)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    final Path absF = fixRelativePart(f);
+    return new FSLinkResolver<Boolean>() {
+      @Override
+      public Boolean next(final AbstractFileSystem fs, final Path p)
+          throws IOException, UnresolvedLinkException {
+        return fs.truncate(p, newLength);
+      }
+    }.resolve(this, absF);
+  }
+
   /**
    * Set replication for an existing file.
    * 

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -212,6 +212,14 @@ public abstract class FilterFs extends AbstractFileSystem {
     return myFs.open(f, bufferSize);
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) 
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    checkPath(f);
+    return myFs.truncate(f, newLength);
+  }
+
   @Override
   public void renameInternal(Path src, Path dst) 
     throws IOException, UnresolvedLinkException {

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -247,6 +247,12 @@ class ChRootedFs extends AbstractFileSystem {
     return myFs.open(fullPath(f), bufferSize);
   }
 
+  @Override
+  public boolean truncate(final Path f, final long newLength)
+      throws IOException, UnresolvedLinkException {
+    return myFs.truncate(fullPath(f), newLength);
+  }
+
   @Override
   public void renameInternal(final Path src, final Path dst)
     throws IOException, UnresolvedLinkException {

+ 16 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -452,7 +452,15 @@ public class ViewFs extends AbstractFileSystem {
     return res.targetFileSystem.open(res.remainingPath, bufferSize);
   }
 
-  
+  @Override
+  public boolean truncate(final Path f, final long newLength)
+      throws AccessControlException, FileNotFoundException,
+      UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(f), true);
+    return res.targetFileSystem.truncate(res.remainingPath, newLength);
+  }
+
   @Override
   public void renameInternal(final Path src, final Path dst,
       final boolean overwrite) throws IOException, UnresolvedLinkException {
@@ -877,6 +885,13 @@ public class ViewFs extends AbstractFileSystem {
       throw new FileNotFoundException("Path points to dir not a file");
     }
 
+    @Override
+    public boolean truncate(final Path f, final long newLength)
+        throws FileNotFoundException, IOException {
+      checkPathIsSlash(f);
+      throw readOnlyMountTable("truncate", f);
+    }
+
     @Override
     public void renameInternal(final Path src, final Path dst)
         throws AccessControlException, IOException {

+ 6 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java

@@ -140,6 +140,12 @@ public class TestAfsCheckPath {
       return null;
     }
 
+    @Override
+    public boolean truncate(Path f, long newLength) throws IOException {
+      // deliberately empty
+      return false;
+    }
+
     @Override
     public void renameInternal(Path src, Path dst) throws IOException {
       // deliberately empty

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -317,6 +317,12 @@ public class Hdfs extends AbstractFileSystem {
     return dfs.createWrappedInputStream(dfsis);
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength)
+      throws IOException, UnresolvedLinkException {
+    return dfs.truncate(getUriPath(f), newLength);
+  }
+
   @Override
   public void renameInternal(Path src, Path dst) 
     throws IOException, UnresolvedLinkException {

+ 31 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -28,6 +28,7 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -114,7 +115,36 @@ public class TestHDFSFileContextMainOperations extends
   private Path getTestRootPath(FileContext fc, String path) {
     return fileContextTestHelper.getTestRootPath(fc, path);
   }
-  
+
+  @Test
+  public void testTruncate() throws Exception {
+    final short repl = 3;
+    final int blockSize = 1024;
+    final int numOfBlocks = 2;
+    DistributedFileSystem fs = cluster.getFileSystem();
+    Path dir = getTestRootPath(fc, "test/hadoop");
+    Path file = getTestRootPath(fc, "test/hadoop/file");
+
+    final byte[] data = FileSystemTestHelper.getFileData(
+        numOfBlocks, blockSize);
+    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
+
+    final int newLength = blockSize;
+
+    boolean isReady = fc.truncate(file, newLength);
+
+    Assert.assertTrue("Recovery is not expected.", isReady);
+
+    FileStatus fileStatus = fc.getFileStatus(file);
+    Assert.assertEquals(fileStatus.getLen(), newLength);
+    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
+
+    ContentSummary cs = fs.getContentSummary(dir);
+    Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
+        newLength * repl);
+    Assert.assertTrue(fs.delete(dir, true));
+  }
+
   @Test
   public void testOldRenameWithQuota() throws Exception {
     DistributedFileSystem fs = cluster.getFileSystem();