浏览代码

HDFS-13642. Creating a file with block size smaller than EC policy's cell size should fail.

(cherry picked from commit cf4108313da83e28d07676078a33016ec8856ff6)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
Xiao Chen 7 年之前
父节点
当前提交
a5690b29a7

+ 18 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.XAttr;
@@ -354,16 +355,28 @@ final class FSDirErasureCodingOp {
   }
 
   /**
-   * Check if the file or directory has an erasure coding policy.
+   * Get the erasure coding policy information for specified path and policy
+   * name. If ec policy name is given, it will be parsed and the corresponding
+   * policy will be returned. Otherwise, get the policy from the parents of the
+   * iip.
    *
    * @param fsn namespace
+   * @param ecPolicyName the ec policy name
    * @param iip inodes in the path containing the file
-   * @return Whether the file or directory has an erasure coding policy.
+   * @return {@link ErasureCodingPolicy}, or null if no policy is found
    * @throws IOException
    */
-  static boolean hasErasureCodingPolicy(final FSNamesystem fsn,
-      final INodesInPath iip) throws IOException {
-    return unprotectedGetErasureCodingPolicy(fsn, iip) != null;
+  static ErasureCodingPolicy getErasureCodingPolicy(FSNamesystem fsn,
+      String ecPolicyName, INodesInPath iip) throws IOException {
+    ErasureCodingPolicy ecPolicy;
+    if (!StringUtils.isEmpty(ecPolicyName)) {
+      ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
+          fsn, ecPolicyName);
+    } else {
+      ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
+          fsn, iip);
+    }
+    return ecPolicy;
   }
 
   /**

+ 2 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.AddBlockFlag;
@@ -551,13 +550,8 @@ class FSDirWriteFileOp {
       boolean isStriped = false;
       ErasureCodingPolicy ecPolicy = null;
       if (!shouldReplicate) {
-        if (!StringUtils.isEmpty(ecPolicyName)) {
-          ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
-              fsd.getFSNamesystem(), ecPolicyName);
-        } else {
-          ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
-              fsd.getFSNamesystem(), existing);
-        }
+        ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
+            fsd.getFSNamesystem(), ecPolicyName, existing);
         if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) {
           isStriped = true;
         }

+ 16 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2431,11 +2431,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       iip = FSDirWriteFileOp.resolvePathForStartFile(
           dir, pc, src, flag, createParent);
 
-      if (shouldReplicate ||
-          (org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName) &&
-          !FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip))) {
-        blockManager.verifyReplication(src, replication, clientMachine);
-      }
 
       if (blockSize < minBlockSize) {
         throw new IOException("Specified block size is less than configured" +
@@ -2443,6 +2438,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             + "): " + blockSize + " < " + minBlockSize);
       }
 
+      if (shouldReplicate) {
+        blockManager.verifyReplication(src, replication, clientMachine);
+      } else {
+        final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp
+            .getErasureCodingPolicy(this, ecPolicyName, iip);
+        if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) {
+          if (blockSize < ecPolicy.getCellSize()) {
+            throw new IOException("Specified block size (" + blockSize
+                + ") is less than the cell size (" + ecPolicy.getCellSize()
+                +") of the erasure coding policy (" + ecPolicy + ").");
+          }
+        } else {
+          blockManager.verifyReplication(src, replication, clientMachine);
+        }
+      }
+
       FileEncryptionInfo feInfo = null;
       if (!iip.isRaw() && provider != null) {
         EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -1558,8 +1558,9 @@ public class DFSTestUtil {
       out.write("replicated".getBytes());
     }
 
-    try (FSDataOutputStream out = filesystem.createFile(
-        new Path(ecDir, "RS-3-2")).ecPolicyName(ecPolicyRS32.getName()).build()) {
+    try (FSDataOutputStream out = filesystem
+        .createFile(new Path(ecDir, "RS-3-2"))
+        .ecPolicyName(ecPolicyRS32.getName()).blockSize(1024 * 1024).build()) {
       out.write("RS-3-2".getBytes());
     }
   }

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
@@ -231,4 +232,19 @@ public class TestDFSStripedOutputStream {
     StripedFileTestUtil.checkData(fs, testPath, writeBytes,
         new ArrayList<DatanodeInfo>(), null, blockSize * dataBlocks);
   }
+
+  @Test
+  public void testFileBlockSizeSmallerThanCellSize() throws Exception {
+    final Path path = new Path("testFileBlockSizeSmallerThanCellSize");
+    final byte[] bytes = StripedFileTestUtil.generateBytes(cellSize * 2);
+    try {
+      DFSTestUtil.writeFile(fs, path, bytes, cellSize / 2);
+      fail("Creating a file with block size smaller than "
+          + "ec policy's cell size should fail");
+    } catch (IOException expected) {
+      LOG.info("Caught expected exception", expected);
+      GenericTestUtils
+          .assertExceptionContains("less than the cell size", expected);
+    }
+  }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java

@@ -71,7 +71,7 @@ public class TestErasureCodingExerciseAPIs {
   private DistributedFileSystem fs;
   private HdfsAdmin dfsAdmin;
   private FileSystemTestWrapper fsWrapper;
-  private static final int BLOCK_SIZE = 1 << 14; // 16k
+  private static final int BLOCK_SIZE = 1 << 20; // 1MB
   private ErasureCodingPolicy ecPolicy;
 
   private static ErasureCodingPolicy getEcPolicy() {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java

@@ -64,7 +64,7 @@ public class TestErasureCodingPolicies {
   private Configuration conf;
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
-  private static final int BLOCK_SIZE = 16 * 1024;
+  private static final int BLOCK_SIZE = 1024 * 1024;
   private ErasureCodingPolicy ecPolicy;
   private FSNamesystem namesystem;
 

二进制
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored


+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

@@ -1510,7 +1510,7 @@
       <REPLICATION>1</REPLICATION>
       <MTIME>1512607204120</MTIME>
       <ATIME>1512607204120</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
+      <BLOCKSIZE>1048576</BLOCKSIZE>
       <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>