소스 검색

HDFS-14581. Appending to EC files crashes NameNode. Contributed by Surendra Singh Lilhore.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
Surendra Singh Lilhore 5 년 전
부모
커밋
5962a518bd

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java

@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -107,6 +108,12 @@ final class FSDirAppendOp {
       }
       final INodeFile file = INodeFile.valueOf(inode, path, true);
 
+      if (file.isStriped() && !newBlock) {
+        throw new UnsupportedOperationException(
+            "Append on EC file without new block is not supported. Use "
+                + CreateFlag.NEW_BLOCK + " create flag while appending file.");
+      }
+
       BlockManager blockManager = fsd.getBlockManager();
       final BlockStoragePolicy lpPolicy = blockManager
           .getStoragePolicy("LAZY_PERSIST");
@@ -186,10 +193,6 @@ final class FSDirAppendOp {
 
     LocatedBlock ret = null;
     if (!newBlock) {
-      if (file.isStriped()) {
-        throw new UnsupportedOperationException(
-            "Append on EC file without new block is not supported.");
-      }
       FSDirectory fsd = fsn.getFSDirectory();
       ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0);
       if (ret != null && delta != null) {

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java

@@ -22,18 +22,26 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.List;
 import java.util.Random;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * Tests append on erasure coded file.
@@ -111,4 +119,31 @@ public class TestStripedFileAppend {
         StripedFileTestUtil.getDefaultECPolicy(), totalSplit);
   }
 
+  @Test
+  public void testAppendWithoutNewBlock() throws IOException {
+    Path file = new Path(dir, "testAppendWithoutNewBlock");
+
+    // Create file
+    FSDataOutputStream out = dfs.create(file);
+    out.write("testAppendWithoutNewBlock".getBytes());
+    out.close();
+
+    // Append file
+    try {
+      out = dfs.append(file, EnumSet.of(CreateFlag.APPEND), 4096, null);
+      out.write("testAppendWithoutNewBlock".getBytes());
+      fail("Should throw unsupported operation");
+    } catch (Exception e) {
+      assertTrue(e.getMessage()
+          .contains("Append on EC file without new block is not supported"));
+    }
+
+    List<OpenFilesType> types = new ArrayList<>();
+    types.add(OpenFilesType.ALL_OPEN_FILES);
+
+    RemoteIterator<OpenFileEntry> listOpenFiles = dfs
+        .listOpenFiles(EnumSet.copyOf(types), file.toString());
+    assertFalse("No file should be open after append failure",
+        listOpenFiles.hasNext());
+  }
 }