Browse Source

HADOOP-5224. HDFS append() is disabled. It throws
UnsupportedOperationException. committed only to 0.19.x (Raghu Angadi)


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.19@744282 13f79535-47bb-0310-9956-ffa450edef68

Raghu Angadi 16 years ago
parent
commit
65cea94e1e

+ 3 - 0
CHANGES.txt

@@ -9,6 +9,9 @@ Release 0.19.1 - Unreleased
     HADOOP-5225. Workaround for tmp file handling in HDFS. sync() is 
     incomplete as a result. committed only to 0.19.x. (Raghu Angadi)
 
+    HADOOP-5224. HDFS append() is disabled. It throws 
+    UnsupportedOperationException. committed only to 0.19.x (Raghu Angadi) 
+
   IMPROVEMENTS
 
     HADOOP-4739. Fix spelling and grammar, improve phrasing of some sections in

+ 5 - 3
src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -175,9 +175,11 @@ public class DistributedFileSystem extends FileSystem {
   /** This optional operation is not yet supported. */
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
-
-    DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
-    return new FSDataOutputStream(op, statistics, op.getInitialLen());
+    // disable append() in 0.19.x
+    throw new UnsupportedOperationException("HDFS does not support append yet");
+        
+    //DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
+    //return new FSDataOutputStream(op, statistics, op.getInitialLen());
   }
 
   public FSDataOutputStream create(Path f, FsPermission permission,

+ 4 - 0
src/test/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -123,6 +123,8 @@ public class TestFileAppend2 extends TestCase {
    * Verify that all data exists in file.
    */ 
   public void testSimpleAppend() throws IOException {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     Configuration conf = new Configuration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -369,6 +371,8 @@ public class TestFileAppend2 extends TestCase {
    * Test that appends to files at random offsets.
    */
   public void testComplexAppend() throws IOException {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     initBuffer(fileSize);
     Configuration conf = new Configuration();
     conf.setInt("heartbeat.recheck.interval", 2000);

+ 12 - 0
src/test/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -67,6 +67,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
   /** TC1: Append on block boundary. */
   public void testTC1() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
 
@@ -92,6 +94,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
   /** TC2: Append on non-block boundary. */
   public void testTC2() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
 
@@ -117,6 +121,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
   /** TC5: Only one simultaneous append. */
   public void testTC5() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
 
@@ -144,6 +150,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
   /** TC7: Corrupted replicas are present. */
   public void testTC7() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
     System.out.println("p=" + p);
@@ -189,6 +197,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
   /** TC11: Racing rename */
   public void testTC11() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
 
@@ -242,6 +252,8 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
   /** TC12: Append to partial CRC chunk */
   public void testTC12() throws Exception {
+    /* HDFS append() is temporarily disabled in 0.19 */
+    if (true) return;
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);
     

+ 8 - 3
src/test/org/apache/hadoop/hdfs/TestQuota.java

@@ -547,6 +547,7 @@ public class TestQuota extends TestCase {
       c = dfs.getContentSummary(dstPath);
       assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
       
+      /* NOTE: append() is not supported in 0.18.
       OutputStream out = dfs.append(file2);
       // appending 1 fileLen should succeed
       out.write(new byte[fileLen]);
@@ -578,6 +579,10 @@ public class TestQuota extends TestCase {
       // verify space after partial append
       c = dfs.getContentSummary(dstPath);
       assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
+      == end of append test == */
+      
+      // reduce quota for quotaDir1 to account for not appending 
+      dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 3 * fileSpace);
       
       // Test set replication :
       
@@ -586,7 +591,7 @@ public class TestQuota extends TestCase {
       
       // verify that space is reduced by file2Len
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace - file2Len);
       
       // now try to increase the replication and and expect an error.
       hasException = false;
@@ -599,7 +604,7 @@ public class TestQuota extends TestCase {
 
       // verify space consumed remains unchanged.
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace - file2Len);
       
       // now increase the quota for quotaDir1 and quotaDir20
       dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
@@ -609,7 +614,7 @@ public class TestQuota extends TestCase {
       dfs.setReplication(file2, (short)(replication+1));
       // verify increase in space
       c = dfs.getContentSummary(dstPath);
-      assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace + file2Len);
       
     } finally {
       cluster.shutdown();