瀏覽代碼

HDFS-10996. Ability to specify per-file EC policy at create time. Contributed by SammiChen.

Andrew Wang 8 年之前
父節點
當前提交
a7312715a6
共有 23 個文件被更改,包括 200 次插入74 次删除
  1. 21 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  2. 3 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  3. 27 7
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  4. 5 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  5. 4 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  6. 1 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
  7. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  8. 35 19
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
  9. 12 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
  10. 7 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  11. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  12. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  13. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  14. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
  15. 45 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
  16. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  17. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
  18. 9 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  19. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  20. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
  21. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
  22. 11 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
  23. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

+ 21 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -1190,13 +1190,31 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       long blockSize, Progressable progress, int buffersize,
       ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes)
       throws IOException {
+    return create(src, permission, flag, createParent, replication, blockSize,
+        progress, buffersize, checksumOpt, favoredNodes, null);
+  }
+
+
+  /**
+   * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
+   * Progressable, int, ChecksumOpt, InetSocketAddress[])} with the addition of
+   * ecPolicyName that is used to specify a specific erasure coding policy
+   * instead of inheriting any policy from this new file's parent directory.
+   * This policy will be persisted in HDFS. A value of null means inheriting
+   * parent groups' whatever policy.
+   */
+  public DFSOutputStream create(String src, FsPermission permission,
+      EnumSet<CreateFlag> flag, boolean createParent, short replication,
+      long blockSize, Progressable progress, int buffersize,
+      ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
+      String ecPolicyName) throws IOException {
     checkOpen();
     final FsPermission masked = applyUMask(permission);
     LOG.debug("{}: masked={}", src, masked);
     final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
         src, masked, flag, createParent, replication, blockSize, progress,
         dfsClientConf.createChecksum(checksumOpt),
-        getFavoredNodesStr(favoredNodes));
+        getFavoredNodesStr(favoredNodes), ecPolicyName);
     beginFileLease(result.getFileId(), result);
     return result;
   }
@@ -1249,7 +1267,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     if (result == null) {
       DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
       result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
-          flag, createParent, replication, blockSize, progress, checksum, null);
+          flag, createParent, replication, blockSize, progress, checksum,
+          null, null);
     }
     beginFileLease(result.getFileId(), result);
     return result;

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -255,7 +255,8 @@ public class DFSOutputStream extends FSOutputSummer
   static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
       FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
       short replication, long blockSize, Progressable progress,
-      DataChecksum checksum, String[] favoredNodes) throws IOException {
+      DataChecksum checksum, String[] favoredNodes, String ecPolicyName)
+      throws IOException {
     try (TraceScope ignored =
              dfsClient.newPathTraceScope("newStreamForCreate", src)) {
       HdfsFileStatus stat = null;
@@ -269,7 +270,7 @@ public class DFSOutputStream extends FSOutputSummer
         try {
           stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
               new EnumSetWritable<>(flag), createParent, replication,
-              blockSize, SUPPORTED_CRYPTO_VERSIONS);
+              blockSize, SUPPORTED_CRYPTO_VERSIONS, ecPolicyName);
           break;
         } catch (RemoteException re) {
           IOException e = re.unwrapRemoteException(

+ 27 - 7
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -100,6 +100,8 @@ import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
+import javax.annotation.Nonnull;
+
 /****************************************************************
  * Implementation of the abstract FileSystem for the DFS system.
  * This object is the way end-user code interacts with a Hadoop
@@ -456,13 +458,18 @@ public class DistributedFileSystem extends FileSystem {
    * at the creation time only. And with favored nodes, blocks will be pinned
    * on the datanodes to prevent balancing move the block. HDFS could move the
    * blocks during replication, to move the blocks from favored nodes. A value
-   * of null means no favored nodes for this create
+   * of null means no favored nodes for this create.
+   * Another addition is ecPolicyName. A non-null ecPolicyName specifies an
+   * explicit erasure coding policy for this file, overriding the inherited
+   * policy. A null ecPolicyName means the file will inherit its EC policy from
+   * an ancestor (the default).
    */
   private HdfsDataOutputStream create(final Path f,
-      final FsPermission permission, EnumSet<CreateFlag> flag,
+      final FsPermission permission, final EnumSet<CreateFlag> flag,
       final int bufferSize, final short replication, final long blockSize,
       final Progressable progress, final ChecksumOpt checksumOpt,
-      final InetSocketAddress[] favoredNodes) throws IOException {
+      final InetSocketAddress[] favoredNodes, final String ecPolicyName)
+      throws IOException {
     statistics.incrementWriteOps(1);
     storageStatistics.incrementOpCounter(OpType.CREATE);
     Path absF = fixRelativePart(f);
@@ -471,7 +478,7 @@ public class DistributedFileSystem extends FileSystem {
       public HdfsDataOutputStream doCall(final Path p) throws IOException {
         final DFSOutputStream out = dfs.create(getPathName(f), permission,
             flag, true, replication, blockSize, progress, bufferSize,
-            checksumOpt, favoredNodes);
+            checksumOpt, favoredNodes, ecPolicyName);
         return dfs.createWrappedOutputStream(out, statistics);
       }
       @Override
@@ -480,7 +487,7 @@ public class DistributedFileSystem extends FileSystem {
         if (fs instanceof DistributedFileSystem) {
           DistributedFileSystem myDfs = (DistributedFileSystem)fs;
           return myDfs.create(p, permission, flag, bufferSize, replication,
-              blockSize, progress, checksumOpt, favoredNodes);
+              blockSize, progress, checksumOpt, favoredNodes, ecPolicyName);
         }
         throw new UnsupportedOperationException("Cannot create with" +
             " favoredNodes through a symlink to a non-DistributedFileSystem: "
@@ -2645,6 +2652,7 @@ public class DistributedFileSystem extends FileSystem {
       extends FSDataOutputStreamBuilder {
     private final DistributedFileSystem dfs;
     private InetSocketAddress[] favoredNodes = null;
+    private String ecPolicyName = null;
 
     public HdfsDataOutputStreamBuilder(DistributedFileSystem dfs, Path path) {
       super(dfs, path);
@@ -2656,17 +2664,29 @@ public class DistributedFileSystem extends FileSystem {
     }
 
     public HdfsDataOutputStreamBuilder setFavoredNodes(
-        final InetSocketAddress[] nodes) {
+        @Nonnull final InetSocketAddress[] nodes) {
       Preconditions.checkNotNull(nodes);
       favoredNodes = nodes.clone();
       return this;
     }
 
+    protected String getEcPolicyName() {
+      return ecPolicyName;
+    }
+
+    public HdfsDataOutputStreamBuilder setEcPolicyName(
+        @Nonnull final String policyName) {
+      Preconditions.checkNotNull(policyName);
+      ecPolicyName = policyName;
+      return this;
+    }
+
     @Override
     public HdfsDataOutputStream build() throws IOException {
       return dfs.create(getPath(), getPermission(), getFlags(),
           getBufferSize(), getReplication(), getBlockSize(),
-          getProgress(), getChecksumOpt(), getFavoredNodes());
+          getProgress(), getChecksumOpt(), getFavoredNodes(),
+          getEcPolicyName());
     }
   }
 

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -160,6 +160,10 @@ public interface ClientProtocol {
    * @param replication block replication factor.
    * @param blockSize maximum block size.
    * @param supportedVersions CryptoProtocolVersions supported by the client
+   * @param ecPolicyName the name of erasure coding policy. A null value means
+   *                     this file will inherit its parent directory's policy,
+   *                     either traditional replication or erasure coding
+   *                     policy.
    *
    * @return the status of the created file, it could be null if the server
    *           doesn't support returning the file status
@@ -193,7 +197,7 @@ public interface ClientProtocol {
   HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
       boolean createParent, short replication, long blockSize,
-      CryptoProtocolVersion[] supportedVersions)
+      CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
       throws IOException;
 
   /**

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -285,7 +285,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
       boolean createParent, short replication, long blockSize,
-      CryptoProtocolVersion[] supportedVersions)
+      CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
       throws IOException {
     CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
         .setSrc(src)
@@ -295,6 +295,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setCreateParent(createParent)
         .setReplication(replication)
         .setBlockSize(blockSize);
+    if (ecPolicyName != null) {
+      builder.setEcPolicyName(ecPolicyName);
+    }
     FsPermission unmasked = masked.getUnmasked();
     if (unmasked != null) {
       builder.setUnmasked(PBHelperClient.convert(unmasked));

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto

@@ -80,6 +80,7 @@ message CreateRequestProto {
   required uint64 blockSize = 7;
   repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
   optional FsPermissionProto unmasked = 9;
+  optional string ecPolicyName = 10;
 }
 
 message CreateResponseProto {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -424,7 +424,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           PBHelperClient.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(),
           (short) req.getReplication(), req.getBlockSize(),
           PBHelperClient.convertCryptoProtocolVersions(
-              req.getCryptoProtocolVersionList()));
+              req.getCryptoProtocolVersionList()),
+          req.getEcPolicyName());
 
       if (result != null) {
         return CreateResponseProto.newBuilder().setFs(PBHelperClient.convert(result))

+ 35 - 19
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -58,6 +58,39 @@ final class FSDirErasureCodingOp {
    */
   private FSDirErasureCodingOp() {}
 
+  /**
+   * Check if the ecPolicyName is valid and enabled, return the corresponding
+   * EC policy if is.
+   * @param fsn namespace
+   * @param ecPolicyName name of EC policy to be checked
+   * @return an erasure coding policy if ecPolicyName is valid and enabled
+   * @throws IOException
+   */
+  static ErasureCodingPolicy getErasureCodingPolicyByName(
+      final FSNamesystem fsn, final String ecPolicyName) throws IOException {
+    assert fsn.hasReadLock();
+    ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
+        .getEnabledPolicyByName(ecPolicyName);
+    if (ecPolicy == null) {
+      final String sysPolicies =
+          Arrays.asList(
+              fsn.getErasureCodingPolicyManager().getEnabledPolicies())
+              .stream()
+              .map(ErasureCodingPolicy::getName)
+              .collect(Collectors.joining(", "));
+      final String message = String.format("Policy '%s' does not match any " +
+              "enabled erasure" +
+              " coding policies: [%s]. The set of enabled erasure coding " +
+              "policies can be configured at '%s'.",
+          ecPolicyName,
+          sysPolicies,
+          DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY
+      );
+      throw new HadoopIllegalArgumentException(message);
+    }
+    return ecPolicy;
+  }
+
   /**
    * Set an erasure coding policy on the given path.
    *
@@ -84,25 +117,8 @@ final class FSDirErasureCodingOp {
     List<XAttr> xAttrs;
     fsd.writeLock();
     try {
-      ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
-          .getEnabledPolicyByName(ecPolicyName);
-      if (ecPolicy == null) {
-        final String sysPolicies =
-            Arrays.asList(
-                fsn.getErasureCodingPolicyManager().getEnabledPolicies())
-                .stream()
-                .map(ErasureCodingPolicy::getName)
-                .collect(Collectors.joining(", "));
-        final String message = String.format("Policy '%s' does not match any " +
-            "enabled erasure" +
-            " coding policies: [%s]. The set of enabled erasure coding " +
-            "policies can be configured at '%s'.",
-            ecPolicyName,
-            sysPolicies,
-            DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY
-            );
-        throw new HadoopIllegalArgumentException(message);
-      }
+      ErasureCodingPolicy ecPolicy = getErasureCodingPolicyByName(fsn,
+          ecPolicyName);
       iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
       // Write access is required to set erasure coding policy
       if (fsd.isPermissionEnabled()) {

+ 12 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CreateFlag;
@@ -351,7 +352,7 @@ class FSDirWriteFileOp {
       EnumSet<CreateFlag> flag, boolean createParent,
       short replication, long blockSize,
       FileEncryptionInfo feInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
-      boolean logRetryEntry)
+      String ecPolicyName, boolean logRetryEntry)
       throws IOException {
     assert fsn.hasWriteLock();
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
@@ -385,7 +386,7 @@ class FSDirWriteFileOp {
         FSDirMkdirOp.createAncestorDirectories(fsd, iip, permissions);
     if (parent != null) {
       iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
-                    replication, blockSize, holder, clientMachine);
+          replication, blockSize, holder, clientMachine, ecPolicyName);
       newNode = iip != null ? iip.getLastINode().asFile() : null;
     }
     if (newNode == null) {
@@ -521,7 +522,7 @@ class FSDirWriteFileOp {
   private static INodesInPath addFile(
       FSDirectory fsd, INodesInPath existing, byte[] localName,
       PermissionStatus permissions, short replication, long preferredBlockSize,
-      String clientName, String clientMachine)
+      String clientName, String clientMachine, String ecPolicyName)
       throws IOException {
 
     Preconditions.checkNotNull(existing);
@@ -530,8 +531,14 @@ class FSDirWriteFileOp {
     fsd.writeLock();
     try {
       boolean isStriped = false;
-      ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
-          unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
+      ErasureCodingPolicy ecPolicy;
+      if (!StringUtils.isEmpty(ecPolicyName)) {
+        ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
+            fsd.getFSNamesystem(), ecPolicyName);
+      } else {
+        ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
+            fsd.getFSNamesystem(), existing);
+      }
       if (ecPolicy != null) {
         isStriped = true;
       }

+ 7 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2180,14 +2180,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   HdfsFileStatus startFile(String src, PermissionStatus permissions,
       String holder, String clientMachine, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize, 
-      CryptoProtocolVersion[] supportedVersions, boolean logRetryCache)
-      throws IOException {
+      boolean createParent, short replication, long blockSize,
+      CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
+      boolean logRetryCache) throws IOException {
 
     HdfsFileStatus status;
     try {
       status = startFileInt(src, permissions, holder, clientMachine, flag,
-          createParent, replication, blockSize, supportedVersions,
+          createParent, replication, blockSize, supportedVersions, ecPolicyName,
           logRetryCache);
     } catch (AccessControlException e) {
       logAuditEvent(false, "create", src);
@@ -2201,8 +2201,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       PermissionStatus permissions, String holder, String clientMachine,
       EnumSet<CreateFlag> flag, boolean createParent, short replication,
       long blockSize, CryptoProtocolVersion[] supportedVersions,
-      boolean logRetryCache)
-      throws IOException {
+      String ecPolicyName, boolean logRetryCache) throws IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       StringBuilder builder = new StringBuilder();
       builder.append("DIR* NameSystem.startFile: src=").append(src)
@@ -2270,9 +2269,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       dir.writeLock();
       try {
         stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder,
-                                          clientMachine, flag, createParent,
-                                          replication, blockSize, feInfo,
-                                          toRemoveBlocks, logRetryCache);
+            clientMachine, flag, createParent, replication, blockSize, feInfo,
+            toRemoveBlocks, ecPolicyName, logRetryCache);
       } catch (IOException e) {
         skipSync = e instanceof StandbyException;
         throw e;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -722,8 +722,8 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   @Override // ClientProtocol
   public HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize, 
-      CryptoProtocolVersion[] supportedVersions)
+      boolean createParent, short replication, long blockSize,
+      CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
       throws IOException {
     checkNNStartup();
     String clientMachine = getClientMachine();
@@ -747,7 +747,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
           .getShortUserName(), null, masked);
       status = namesystem.startFile(src, perm, clientName, clientMachine,
           flag.get(), createParent, replication, blockSize, supportedVersions,
-          cacheEntry != null);
+          ecPolicyName, cacheEntry != null);
     } finally {
       RetryCache.setState(cacheEntry, status != null, status);
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -1966,7 +1966,7 @@ public class DFSTestUtil {
         .create(file.toString(), new FsPermission((short)0755),
         dfs.getClient().getClientName(),
         new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)),
-        false, (short)1, 128*1024*1024L, null);
+        false, (short)1, 128*1024*1024L, null, null);
 
     FSNamesystem ns = cluster.getNamesystem();
     FSDirectory fsdir = ns.getFSDirectory();

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -269,7 +269,8 @@ public class TestDFSClientRetries {
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
+            anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
+            anyObject());
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);
     OutputStream os = client.create("testfile", true);

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java

@@ -899,7 +899,8 @@ public class TestEncryptionZones {
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
+            anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
+            anyObject());
   }
 
   // This test only uses mocks. Called from the end of an existing test to

+ 45 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -519,4 +520,48 @@ public class TestErasureCodingPolicies {
     noadmin.getErasureCodingPolicies();
     superadmin.getErasureCodingPolicies();
   }
+
+  /**
+   * Test apply specific erasure coding policy on single file. Usually file's
+   * policy is inherited from its parent.
+   */
+  @Test
+  public void testFileLevelECPolicy() throws Exception {
+    final Path dirPath = new Path("/striped");
+    final Path filePath0 = new Path(dirPath, "file0");
+    final Path filePath1 = new Path(dirPath, "file1");
+
+    fs.mkdirs(dirPath);
+    fs.setErasureCodingPolicy(dirPath, EC_POLICY.getName());
+
+    // null EC policy name value means inheriting parent directory's policy
+    fs.newFSDataOutputStreamBuilder(filePath0).build().close();
+    ErasureCodingPolicy ecPolicyOnFile = fs.getErasureCodingPolicy(filePath0);
+    assertEquals(EC_POLICY, ecPolicyOnFile);
+
+    // Test illegal EC policy name
+    final String illegalPolicyName = "RS-DEFAULT-1-2-64k";
+    try {
+      fs.newFSDataOutputStreamBuilder(filePath1)
+          .setEcPolicyName(illegalPolicyName).build().close();
+      Assert.fail("illegal erasure coding policy should not be found");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains("Policy '" + illegalPolicyName
+          + "' does not match any enabled erasure coding policies", e);
+    }
+    fs.delete(dirPath, true);
+
+    // Test create a file with a different EC policy than its parent directory
+    fs.mkdirs(dirPath);
+    final ErasureCodingPolicy ecPolicyOnDir =
+        SystemErasureCodingPolicies.getByID(
+            SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
+    ecPolicyOnFile = EC_POLICY;
+    fs.setErasureCodingPolicy(dirPath, ecPolicyOnDir.getName());
+    fs.newFSDataOutputStreamBuilder(filePath0)
+        .setEcPolicyName(ecPolicyOnFile.getName()).build().close();
+    assertEquals(ecPolicyOnFile, fs.getErasureCodingPolicy(filePath0));
+    assertEquals(ecPolicyOnDir, fs.getErasureCodingPolicy(dirPath));
+    fs.delete(dirPath, true);
+  }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -1143,7 +1143,7 @@ public class TestFileCreation {
           try {
             nnrpc.create(pathStr, new FsPermission((short)0755), "client",
                 new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
-                true, (short)1, 128*1024*1024L, null);
+                true, (short)1, 128*1024*1024L, null, null);
             fail("Should have thrown exception when creating '"
                 + pathStr + "'" + " by " + method);
           } catch (InvalidPathException ipe) {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java

@@ -363,7 +363,8 @@ public class TestLease {
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
+            anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
+            anyObject());
 
     final Configuration conf = new Configuration();
     final DFSClient c1 = createDFSClientAs(ugi[0], conf);

+ 9 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -587,14 +587,16 @@ public class NNThroughputBenchmark implements Tool {
     throws IOException {
       long start = Time.now();
       // dummyActionNoSynch(fileIdx);
-      clientProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
-                      clientName, new EnumSetWritable<CreateFlag>(EnumSet
-              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
-          replication, BLOCK_SIZE, CryptoProtocolVersion.supported());
+      clientProto.create(fileNames[daemonId][inputIdx],
+          FsPermission.getDefault(), clientName,
+          new EnumSetWritable<CreateFlag>(EnumSet
+              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
+          replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
       long end = Time.now();
-      for(boolean written = !closeUponCreate; !written; 
+      for (boolean written = !closeUponCreate; !written;
         written = clientProto.complete(fileNames[daemonId][inputIdx],
-                                    clientName, null, HdfsConstants.GRANDFATHER_INODE_ID));
+            clientName, null, HdfsConstants.GRANDFATHER_INODE_ID)) {
+      };
       return end-start;
     }
 
@@ -1139,7 +1141,7 @@ public class NNThroughputBenchmark implements Tool {
         String fileName = nameGenerator.getNextFileName("ThroughputBench");
         clientProto.create(fileName, FsPermission.getDefault(), clientName,
             new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
-            BLOCK_SIZE, CryptoProtocolVersion.supported());
+            BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
         ExtendedBlock lastBlock = addBlocks(fileName, clientName);
         clientProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID);
       }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -85,7 +85,7 @@ public class TestAddBlockRetry {
     nn.create(src, FsPermission.getFileDefault(),
         "clientName",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
-        true, (short)3, 1024, null);
+        true, (short)3, 1024, null, null);
 
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
@@ -157,7 +157,7 @@ public class TestAddBlockRetry {
     // create file
     nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
-        (short) 3, 1024, null);
+        (short) 3, 1024, null, null);
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -109,7 +109,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
         // Create the file with client machine
         HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
             clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
-            replication, DEFAULT_BLOCK_SIZE, null, false);
+            replication, DEFAULT_BLOCK_SIZE, null, null, false);
 
         //test chooseTarget for new file
         LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
@@ -139,7 +139,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
     // Create the file with client machine
     HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
         clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
-        (short) 20, DEFAULT_BLOCK_SIZE, null, false);
+        (short) 20, DEFAULT_BLOCK_SIZE, null, null, false);
 
     //test chooseTarget for new file
     LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java

@@ -138,7 +138,7 @@ public class TestDefaultBlockPlacementPolicy {
       // Create the file with client machine
       HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
           clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
-          REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
+          REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, null, false);
       LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
           null, null, fileStatus.getFileId(), null, null);
 

+ 11 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java

@@ -223,15 +223,20 @@ public class TestNamenodeRetryCache {
     // Two retried calls succeed
     newCall();
     HdfsFileStatus status = nnRpc.create(src, perm, "holder",
-      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
-      (short) 1, BlockSize, null);
-    Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
-    Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
-    
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 1, BlockSize, null, null);
+    Assert.assertEquals(status, nnRpc.create(src, perm, "holder",
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 1, BlockSize, null, null));
+    Assert.assertEquals(status, nnRpc.create(src, perm, "holder",
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 1, BlockSize, null, null));
     // A non-retried call fails
     newCall();
     try {
-      nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
+      nnRpc.create(src, perm, "holder",
+          new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
+          true, (short) 1, BlockSize, null, null);
       Assert.fail("testCreate - expected exception is not thrown");
     } catch (IOException e) {
       // expected

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

@@ -405,7 +405,8 @@ public class TestRetryCacheWithHA {
           FsPermission.getFileDefault(), client.getClientName(),
           new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes,
           BlockSize,
-          new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES});
+          new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES},
+          null);
     }
 
     @Override