Browse Source

svn merge -c 1478517 from branch-1 for HDFS-4774. Backport HDFS-4525: Provide an API for knowing whether a file is closed.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2@1478518 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 years ago
parent
commit
319c2bdad8

+ 3 - 0
CHANGES.txt

@@ -73,6 +73,9 @@ Release 1.2.0 - 2013.04.16
 
     HDFS-4776. Backport SecondaryNameNode web ui.  (szetszwo)
 
+    HDFS-4774. Backport HDFS-4525: Provide an API for knowing whether a file is
+    closed.  (Ted Yu via szetszwo)
+
   IMPROVEMENTS
 
     HADOOP-9434. Backport HADOOP-9267: hadoop -h|-{0,2}help should print usage.

+ 14 - 0
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -891,6 +891,20 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     }
   }
   
+  /**
+   * Close status of a file
+   * @return true if file is already closed
+   */
+  public boolean isFileClosed(String src) throws IOException{
+    checkOpen();
+    try {
+      return namenode.isFileClosed(src);
+    } catch(RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     FileNotFoundException.class);
+    }
+  }
+
   /**
    * Append to an existing HDFS file.  
    * 

+ 12 - 0
src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -166,6 +166,18 @@ public class DistributedFileSystem extends FileSystem {
           dfs.open(getPathName(f), bufferSize, verifyChecksum, statistics));
   }
 
+  /**
+   * Get the close status of a file
+   * @param src The path to the file
+   *
+   * @return return true if file is closed
+   * @throws FileNotFoundException if the file does not exist.
+   * @throws IOException If an I/O error occurred
+   */
+  public boolean isFileClosed(Path src) throws IOException {
+    return dfs.isFileClosed(getPathName(src));
+  }
+
   /** 
    * Start the lease recovery of a file
    *

+ 14 - 1
src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -54,7 +54,8 @@ public interface ClientProtocol extends VersionedProtocol {
    *     multiple blocks within a single BlockTokenIdentifier 
    *     
    *     (bumped to 61 to bring in line with trunk)
-   * Added concat() - since this an addition of method, it does not break
+   * Added concat() and isFileClosed()
+   * - since this is addition of methods, it does not break
    * compatibility and version number does not need to be changed.
    */
   public static final long versionID = 61L;
@@ -161,6 +162,18 @@ public interface ClientProtocol extends VersionedProtocol {
    */
   public boolean recoverLease(String src, String clientName) throws IOException;
 
+  /**
+   * Get the close status of a file
+   * @param src The string representation of the path to the file
+   *
+   * @return return true if file is closed
+   * @throws AccessControlException permission denied
+   * @throws FileNotFoundException file <code>src</code> is not found
+   * @throws IOException If an I/O error occurred
+   */
+  public boolean isFileClosed(String src) throws AccessControlException,
+      FileNotFoundException, IOException;
+
   /**
    * Set replication for an existing file.
    * <p>

+ 19 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1653,6 +1653,25 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
     }
   }
 
+  /**
+   * Returns true if the file is closed
+   */
+  boolean isFileClosed(String src)
+      throws AccessControlException, IOException {
+    FSPermissionChecker pc = getPermissionChecker();
+    synchronized (this) {
+      if (isPermissionEnabled) {
+        checkTraverse(pc, src);
+      }
+      INode inode = dir.getFileINode(src);
+      if (inode == null) {
+        throw new FileNotFoundException("File not found " + src);
+      }
+
+      return !inode.isUnderConstruction();
+    }
+  }
+  
   /**
    * Recover lease;
    * Immediately revoke the lease of the current lease holder and start lease

+ 5 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -727,6 +727,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
     return info;
   }
 
+  /** {@inheritDoc} */
+  public boolean isFileClosed(String src) throws IOException {
+    return namesystem.isFileClosed(src);
+  }
+
   /** {@inheritDoc} */
   public boolean recoverLease(String src, String clientName) throws IOException {
     String clientMachine = getClientMachine();

+ 4 - 0
src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -225,6 +225,10 @@ public class TestDFSClientRetries extends TestCase {
       return versionID;
     }
 
+    public boolean isFileClosed(String src) throws IOException {
+      return true;
+    }
+
     public LocatedBlock addBlock(String src, String clientName)
     throws IOException
     {

+ 22 - 0
src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -472,4 +472,26 @@ public class TestDistributedFileSystem {
     testDFSClient();
     testFileChecksum();
   }
+
+  @Test(timeout=60000)
+  public void testFileCloseStatus() throws IOException {
+    Configuration conf = getTestConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
+    try {
+      // create a new file.
+      Path file = new Path("/simpleFlush.dat");
+      FSDataOutputStream output = fs.create(file);
+      // write to file
+      output.writeBytes("Some test data");
+      output.flush();
+      assertFalse("File status should be open", fs.isFileClosed(file));
+      output.close();
+      assertTrue("File status should be closed", fs.isFileClosed(file));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }