Преглед изворни кода

HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao.

(cherry picked from commit bee5a6a64a1c037308fa4d52249be39c82791590)
Jing Zhao пре 10 година
родитељ
комит
6dcc79507d

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -896,6 +896,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7945. The WebHdfs system on DN does not honor the length parameter.
     (wheat9)
 
+    HDFS-7943. Append cannot handle the last block with length greater than
+    the preferred block size. (jing9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

+ 24 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java

@@ -34,6 +34,16 @@ import java.util.List;
 
 import static org.apache.hadoop.util.Time.now;
 
+/**
+ * Restrictions for a concat operation:
+ * <pre>
+ * 1. the src file and the target file are in the same dir
+ * 2. all the source files are not in snapshot
+ * 3. any source file cannot be the same with the target file
+ * 4. source files cannot be under construction or empty
+ * 5. source file's preferred block size cannot be greater than the target file
+ * </pre>
+ */
 class FSDirConcatOp {
 
   static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
@@ -123,14 +133,25 @@ class FSDirConcatOp {
         throw new SnapshotException("Concat: the source file " + src
             + " is referred by some other reference in some snapshot.");
       }
+      // source file cannot be the same with the target file
       if (srcINode == targetINode) {
         throw new HadoopIllegalArgumentException("concat: the src file " + src
             + " is the same with the target file " + targetIIP.getPath());
       }
+      // source file cannot be under construction or empty
       if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
         throw new HadoopIllegalArgumentException("concat: source file " + src
             + " is invalid or empty or underConstruction");
       }
+      // source file's preferred block size cannot be greater than the target
+      // file
+      if (srcINodeFile.getPreferredBlockSize() >
+          targetINode.getPreferredBlockSize()) {
+        throw new HadoopIllegalArgumentException("concat: source file " + src
+            + " has preferred block size " + srcINodeFile.getPreferredBlockSize()
+            + " which is greater than the target file's preferred block size "
+            + targetINode.getPreferredBlockSize());
+      }
       si.add(srcINodeFile);
     }
 
@@ -143,9 +164,10 @@ class FSDirConcatOp {
     return si.toArray(new INodeFile[si.size()]);
   }
 
-  private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) {
+  private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
+      INodeFile target, INodeFile[] srcList) {
     QuotaCounts deltas = new QuotaCounts.Builder().build();
-    short targetRepl = target.getBlockReplication();
+    final short targetRepl = target.getBlockReplication();
     for (INodeFile src : srcList) {
       short srcRepl = src.getBlockReplication();
       long fileSize = src.computeFileSize();

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -388,6 +389,22 @@ public class TestHDFSConcat {
     } catch (Exception e) {
       // exspected
     }
+
+    // the source file's preferred block size cannot be greater than the target
+    {
+      final Path src1 = new Path(parentDir, "src1");
+      DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L);
+      final Path src2 = new Path(parentDir, "src2");
+      // create a file whose preferred block size is greater than the target
+      DFSTestUtil.createFile(dfs, src2, 1024, fileLen,
+          dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L);
+      try {
+        dfs.concat(trg, new Path[] {src1, src2});
+        fail("didn't fail for src with greater preferred block size");
+      } catch (Exception e) {
+        GenericTestUtils.assertExceptionContains("preferred block size", e);
+      }
+    }
   }
 
   /**